Merge branch 'linus' into tracing/hw-branch-tracing

Merge reason: update to latest tracing and ptrace APIs

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Этот коммит содержится в:
Ingo Molnar
2009-04-07 13:34:26 +02:00
родитель c78a3956b9 d508afb437
Коммит 2e8844e13a
7804 изменённых файлов: 875568 добавлений и 324712 удалений

Просмотреть файл

@@ -67,11 +67,11 @@ obj-$(CONFIG_X86_MPPARSE) += mpparse.o
obj-y += apic/
obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
obj-$(CONFIG_X86_VSMP) += vsmp_64.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_MODULES) += module_$(BITS).o
obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o
@@ -107,7 +107,7 @@ obj-$(CONFIG_MICROCODE) += microcode.o
obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o
obj-$(CONFIG_SWIOTLB) += pci-swiotlb_64.o # NB rename without _64
obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
###
# 64 bit specific files
@@ -121,4 +121,5 @@ ifeq ($(CONFIG_X86_64),y)
obj-$(CONFIG_AMD_IOMMU) += amd_iommu_init.o amd_iommu.o
obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o
obj-y += vsmp_64.o
endif

Просмотреть файл

@@ -229,6 +229,35 @@ static void __cpuinit acpi_register_lapic(int id, u8 enabled)
generic_processor_info(id, ver);
}
static int __init
acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
{
struct acpi_madt_local_x2apic *processor = NULL;
processor = (struct acpi_madt_local_x2apic *)header;
if (BAD_MADT_ENTRY(processor, end))
return -EINVAL;
acpi_table_print_madt_entry(header);
#ifdef CONFIG_X86_X2APIC
/*
* We need to register disabled CPU as well to permit
* counting disabled CPUs. This allows us to size
* cpus_possible_map more accurately, to permit
* to not preallocating memory for all NR_CPUS
* when we use CPU hotplug.
*/
acpi_register_lapic(processor->local_apic_id, /* APIC ID */
processor->lapic_flags & ACPI_MADT_ENABLED);
#else
printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
#endif
return 0;
}
static int __init
acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
{
@@ -288,6 +317,25 @@ acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
return 0;
}
static int __init
acpi_parse_x2apic_nmi(struct acpi_subtable_header *header,
const unsigned long end)
{
struct acpi_madt_local_x2apic_nmi *x2apic_nmi = NULL;
x2apic_nmi = (struct acpi_madt_local_x2apic_nmi *)header;
if (BAD_MADT_ENTRY(x2apic_nmi, end))
return -EINVAL;
acpi_table_print_madt_entry(header);
if (x2apic_nmi->lint != 1)
printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
return 0;
}
static int __init
acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end)
{
@@ -793,6 +841,7 @@ static int __init early_acpi_parse_madt_lapic_addr_ovr(void)
static int __init acpi_parse_madt_lapic_entries(void)
{
int count;
int x2count = 0;
if (!cpu_has_apic)
return -ENODEV;
@@ -816,22 +865,28 @@ static int __init acpi_parse_madt_lapic_entries(void)
count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC,
acpi_parse_sapic, MAX_APICS);
if (!count)
if (!count) {
x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC,
acpi_parse_x2apic, MAX_APICS);
count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC,
acpi_parse_lapic, MAX_APICS);
if (!count) {
}
if (!count && !x2count) {
printk(KERN_ERR PREFIX "No LAPIC entries present\n");
/* TBD: Cleanup to allow fallback to MPS */
return -ENODEV;
} else if (count < 0) {
} else if (count < 0 || x2count < 0) {
printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
/* TBD: Cleanup to allow fallback to MPS */
return count;
}
x2count =
acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC_NMI,
acpi_parse_x2apic_nmi, 0);
count =
acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0);
if (count < 0) {
if (count < 0 || x2count < 0) {
printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
/* TBD: Cleanup to allow fallback to MPS */
return count;
@@ -1470,7 +1525,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
/*
* If your system is blacklisted here, but you find that acpi=force
* works for you, please contact acpi-devel@sourceforge.net
* works for you, please contact linux-acpi@vger.kernel.org
*/
static struct dmi_system_id __initdata acpi_dmi_table[] = {
/*

Просмотреть файл

@@ -22,10 +22,9 @@
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/iommu-helper.h>
#ifdef CONFIG_IOMMU_API
#include <linux/iommu.h>
#endif
#include <asm/proto.h>
#include <asm/iommu.h>
#include <asm/gart.h>
@@ -1297,8 +1296,10 @@ static void __unmap_single(struct amd_iommu *iommu,
/*
* The exported map_single function for dma_ops.
*/
static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
size_t size, int dir)
static dma_addr_t map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
unsigned long flags;
struct amd_iommu *iommu;
@@ -1306,6 +1307,7 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
u16 devid;
dma_addr_t addr;
u64 dma_mask;
phys_addr_t paddr = page_to_phys(page) + offset;
INC_STATS_COUNTER(cnt_map_single);
@@ -1340,8 +1342,8 @@ out:
/*
* The exported unmap_single function for dma_ops.
*/
static void unmap_single(struct device *dev, dma_addr_t dma_addr,
size_t size, int dir)
static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir, struct dma_attrs *attrs)
{
unsigned long flags;
struct amd_iommu *iommu;
@@ -1390,7 +1392,8 @@ static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist,
* lists).
*/
static int map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, int dir)
int nelems, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
unsigned long flags;
struct amd_iommu *iommu;
@@ -1457,7 +1460,8 @@ unmap:
* lists).
*/
static void unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, int dir)
int nelems, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
unsigned long flags;
struct amd_iommu *iommu;
@@ -1644,11 +1648,11 @@ static void prealloc_protection_domains(void)
}
}
static struct dma_mapping_ops amd_iommu_dma_ops = {
static struct dma_map_ops amd_iommu_dma_ops = {
.alloc_coherent = alloc_coherent,
.free_coherent = free_coherent,
.map_single = map_single,
.unmap_single = unmap_single,
.map_page = map_page,
.unmap_page = unmap_page,
.map_sg = map_sg,
.unmap_sg = unmap_sg,
.dma_supported = amd_iommu_dma_supported,
@@ -1924,6 +1928,12 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
return paddr;
}
static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
unsigned long cap)
{
return 0;
}
static struct iommu_ops amd_iommu_ops = {
.domain_init = amd_iommu_domain_init,
.domain_destroy = amd_iommu_domain_destroy,
@@ -1932,5 +1942,6 @@ static struct iommu_ops amd_iommu_ops = {
.map = amd_iommu_map_range,
.unmap = amd_iommu_unmap_range,
.iova_to_phys = amd_iommu_iova_to_phys,
.domain_has_cap = amd_iommu_domain_has_cap,
};

Просмотреть файл

@@ -809,7 +809,7 @@ void clear_local_APIC(void)
u32 v;
/* APIC hasn't been mapped yet */
if (!apic_phys)
if (!x2apic && !apic_phys)
return;
maxlvt = lapic_get_maxlvt();
@@ -1304,6 +1304,7 @@ void __init enable_IR_x2apic(void)
#ifdef CONFIG_INTR_REMAP
int ret;
unsigned long flags;
struct IO_APIC_route_entry **ioapic_entries = NULL;
if (!cpu_has_x2apic)
return;
@@ -1334,16 +1335,23 @@ void __init enable_IR_x2apic(void)
return;
}
local_irq_save(flags);
mask_8259A();
ioapic_entries = alloc_ioapic_entries();
if (!ioapic_entries) {
pr_info("Allocate ioapic_entries failed: %d\n", ret);
goto end;
}
ret = save_mask_IO_APIC_setup();
ret = save_IO_APIC_setup(ioapic_entries);
if (ret) {
pr_info("Saving IO-APIC state failed: %d\n", ret);
goto end;
}
ret = enable_intr_remapping(1);
local_irq_save(flags);
mask_IO_APIC_setup(ioapic_entries);
mask_8259A();
ret = enable_intr_remapping(EIM_32BIT_APIC_ID);
if (ret && x2apic_preenabled) {
local_irq_restore(flags);
@@ -1363,14 +1371,14 @@ end_restore:
/*
* IR enabling failed
*/
restore_IO_APIC_setup();
restore_IO_APIC_setup(ioapic_entries);
else
reinit_intr_remapped_IO_APIC(x2apic_preenabled);
reinit_intr_remapped_IO_APIC(x2apic_preenabled, ioapic_entries);
end:
unmask_8259A();
local_irq_restore(flags);
end:
if (!ret) {
if (!x2apic_preenabled)
pr_info("Enabled x2apic and interrupt-remapping\n");
@@ -1378,6 +1386,8 @@ end:
pr_info("Enabled Interrupt-remapping\n");
} else
pr_err("Failed to enable Interrupt-remapping and x2apic\n");
if (ioapic_entries)
free_ioapic_entries(ioapic_entries);
#else
if (!cpu_has_x2apic)
return;
@@ -1523,12 +1533,10 @@ void __init early_init_lapic_mapping(void)
*/
void __init init_apic_mappings(void)
{
#ifdef CONFIG_X86_X2APIC
if (x2apic) {
boot_cpu_physical_apicid = read_apic_id();
return;
}
#endif
/*
* If no local APIC can be found then set up a fake all
@@ -1955,6 +1963,10 @@ static int lapic_suspend(struct sys_device *dev, pm_message_t state)
local_irq_save(flags);
disable_local_APIC();
#ifdef CONFIG_INTR_REMAP
if (intr_remapping_enabled)
disable_intr_remapping();
#endif
local_irq_restore(flags);
return 0;
}
@@ -1965,19 +1977,42 @@ static int lapic_resume(struct sys_device *dev)
unsigned long flags;
int maxlvt;
#ifdef CONFIG_INTR_REMAP
int ret;
struct IO_APIC_route_entry **ioapic_entries = NULL;
if (!apic_pm_state.active)
return 0;
maxlvt = lapic_get_maxlvt();
local_irq_save(flags);
if (x2apic) {
ioapic_entries = alloc_ioapic_entries();
if (!ioapic_entries) {
WARN(1, "Alloc ioapic_entries in lapic resume failed.");
return -ENOMEM;
}
ret = save_IO_APIC_setup(ioapic_entries);
if (ret) {
WARN(1, "Saving IO-APIC state failed: %d\n", ret);
free_ioapic_entries(ioapic_entries);
return ret;
}
mask_IO_APIC_setup(ioapic_entries);
mask_8259A();
enable_x2apic();
}
#else
if (!apic_pm_state.active)
return 0;
local_irq_save(flags);
#ifdef CONFIG_X86_X2APIC
if (x2apic)
enable_x2apic();
else
#endif
{
else {
/*
* Make sure the APICBASE points to the right address
*
@@ -1990,6 +2025,7 @@ static int lapic_resume(struct sys_device *dev)
wrmsr(MSR_IA32_APICBASE, l, h);
}
maxlvt = lapic_get_maxlvt();
apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
apic_write(APIC_ID, apic_pm_state.apic_id);
apic_write(APIC_DFR, apic_pm_state.apic_dfr);
@@ -2013,8 +2049,20 @@ static int lapic_resume(struct sys_device *dev)
apic_write(APIC_ESR, 0);
apic_read(APIC_ESR);
#ifdef CONFIG_INTR_REMAP
if (intr_remapping_enabled)
reenable_intr_remapping(EIM_32BIT_APIC_ID);
if (x2apic) {
unmask_8259A();
restore_IO_APIC_setup(ioapic_entries);
free_ioapic_entries(ioapic_entries);
}
#endif
local_irq_restore(flags);
return 0;
}
@@ -2052,7 +2100,9 @@ static int __init init_lapic_sysfs(void)
error = sysdev_register(&device_lapic);
return error;
}
device_initcall(init_lapic_sysfs);
/* local apic needs to resume before other devices access its registers. */
core_initcall(init_lapic_sysfs);
#else /* CONFIG_PM */

Просмотреть файл

@@ -159,20 +159,6 @@ static int flat_apic_id_registered(void)
return physid_isset(read_xapic_id(), phys_cpu_present_map);
}
static unsigned int flat_cpu_mask_to_apicid(const struct cpumask *cpumask)
{
return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS;
}
static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask)
{
unsigned long mask1 = cpumask_bits(cpumask)[0] & APIC_ALL_CPUS;
unsigned long mask2 = cpumask_bits(andmask)[0] & APIC_ALL_CPUS;
return mask1 & mask2;
}
static int flat_phys_pkg_id(int initial_apic_id, int index_msb)
{
return hard_smp_processor_id() >> index_msb;
@@ -213,8 +199,8 @@ struct apic apic_flat = {
.set_apic_id = set_apic_id,
.apic_id_mask = 0xFFu << 24,
.cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
.cpu_mask_to_apicid = default_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
.send_IPI_mask = flat_send_IPI_mask,
.send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself,

Просмотреть файл

@@ -26,12 +26,12 @@ static int bigsmp_apic_id_registered(void)
return 1;
}
static const cpumask_t *bigsmp_target_cpus(void)
static const struct cpumask *bigsmp_target_cpus(void)
{
#ifdef CONFIG_SMP
return &cpu_online_map;
return cpu_online_mask;
#else
return &cpumask_of_cpu(0);
return cpumask_of(0);
#endif
}
@@ -118,9 +118,9 @@ static int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid)
}
/* As we are using single CPU as destination, pick only one CPU here */
static unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask)
static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask)
{
return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask));
return bigsmp_cpu_to_logical_apicid(cpumask_first(cpumask));
}
static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
@@ -188,10 +188,10 @@ static const struct dmi_system_id bigsmp_dmi_table[] = {
{ } /* NULL entry stops DMI scanning */
};
static void bigsmp_vector_allocation_domain(int cpu, cpumask_t *retmask)
static void bigsmp_vector_allocation_domain(int cpu, struct cpumask *retmask)
{
cpus_clear(*retmask);
cpu_set(cpu, *retmask);
cpumask_clear(retmask);
cpumask_set_cpu(cpu, retmask);
}
static int probe_bigsmp(void)

Просмотреть файл

@@ -410,7 +410,7 @@ static void es7000_enable_apic_mode(void)
WARN(1, "Command failed, status = %x\n", mip_status);
}
static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask)
static void es7000_vector_allocation_domain(int cpu, struct cpumask *retmask)
{
/* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest
@@ -420,7 +420,8 @@ static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask)
* deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination.
*/
*retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
cpumask_clear(retmask);
cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
}
@@ -455,14 +456,14 @@ static int es7000_apic_id_registered(void)
return 1;
}
static const cpumask_t *target_cpus_cluster(void)
static const struct cpumask *target_cpus_cluster(void)
{
return &CPU_MASK_ALL;
return cpu_all_mask;
}
static const cpumask_t *es7000_target_cpus(void)
static const struct cpumask *es7000_target_cpus(void)
{
return &cpumask_of_cpu(smp_processor_id());
return cpumask_of(smp_processor_id());
}
static unsigned long
@@ -517,7 +518,7 @@ static void es7000_setup_apic_routing(void)
"Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
(apic_version[apic] == 0x14) ?
"Physical Cluster" : "Logical Cluster",
nr_ioapics, cpus_addr(*es7000_target_cpus())[0]);
nr_ioapics, cpumask_bits(es7000_target_cpus())[0]);
}
static int es7000_apicid_to_node(int logical_apicid)
@@ -572,7 +573,7 @@ static int es7000_check_phys_apicid_present(int cpu_physical_apicid)
return 1;
}
static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask)
static unsigned int es7000_cpu_mask_to_apicid(const struct cpumask *cpumask)
{
unsigned int round = 0;
int cpu, uninitialized_var(apicid);

Просмотреть файл

@@ -389,6 +389,8 @@ struct io_apic {
unsigned int index;
unsigned int unused[3];
unsigned int data;
unsigned int unused2[11];
unsigned int eoi;
};
static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
@@ -397,6 +399,12 @@ static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
+ (mp_ioapics[idx].apicaddr & ~PAGE_MASK);
}
static inline void io_apic_eoi(unsigned int apic, unsigned int vector)
{
struct io_apic __iomem *io_apic = io_apic_base(apic);
writel(vector, &io_apic->eoi);
}
static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
{
struct io_apic __iomem *io_apic = io_apic_base(apic);
@@ -546,16 +554,12 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq
apic = entry->apic;
pin = entry->pin;
#ifdef CONFIG_INTR_REMAP
/*
* With interrupt-remapping, destination information comes
* from interrupt-remapping table entry.
*/
if (!irq_remapped(irq))
io_apic_write(apic, 0x11 + pin*2, dest);
#else
io_apic_write(apic, 0x11 + pin*2, dest);
#endif
reg = io_apic_read(apic, 0x10 + pin*2);
reg &= ~IO_APIC_REDIR_VECTOR_MASK;
reg |= vector;
@@ -588,10 +592,12 @@ set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
if (assign_irq_vector(irq, cfg, mask))
return BAD_APICID;
cpumask_and(desc->affinity, cfg->domain, mask);
/* check that before desc->addinity get updated */
set_extra_move_desc(desc, mask);
return apic->cpu_mask_to_apicid_and(desc->affinity, cpu_online_mask);
cpumask_copy(desc->affinity, mask);
return apic->cpu_mask_to_apicid_and(desc->affinity, cfg->domain);
}
static void
@@ -845,75 +851,106 @@ __setup("pirq=", ioapic_pirq_setup);
#endif /* CONFIG_X86_32 */
#ifdef CONFIG_INTR_REMAP
/* I/O APIC RTE contents at the OS boot up */
static struct IO_APIC_route_entry *early_ioapic_entries[MAX_IO_APICS];
/*
* Saves and masks all the unmasked IO-APIC RTE's
*/
int save_mask_IO_APIC_setup(void)
struct IO_APIC_route_entry **alloc_ioapic_entries(void)
{
union IO_APIC_reg_01 reg_01;
unsigned long flags;
int apic, pin;
int apic;
struct IO_APIC_route_entry **ioapic_entries;
/*
* The number of IO-APIC IRQ registers (== #pins):
*/
for (apic = 0; apic < nr_ioapics; apic++) {
spin_lock_irqsave(&ioapic_lock, flags);
reg_01.raw = io_apic_read(apic, 1);
spin_unlock_irqrestore(&ioapic_lock, flags);
nr_ioapic_registers[apic] = reg_01.bits.entries+1;
}
ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
GFP_ATOMIC);
if (!ioapic_entries)
return 0;
for (apic = 0; apic < nr_ioapics; apic++) {
early_ioapic_entries[apic] =
ioapic_entries[apic] =
kzalloc(sizeof(struct IO_APIC_route_entry) *
nr_ioapic_registers[apic], GFP_KERNEL);
if (!early_ioapic_entries[apic])
nr_ioapic_registers[apic], GFP_ATOMIC);
if (!ioapic_entries[apic])
goto nomem;
}
for (apic = 0; apic < nr_ioapics; apic++)
return ioapic_entries;
nomem:
while (--apic >= 0)
kfree(ioapic_entries[apic]);
kfree(ioapic_entries);
return 0;
}
/*
* Saves all the IO-APIC RTE's
*/
int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries)
{
int apic, pin;
if (!ioapic_entries)
return -ENOMEM;
for (apic = 0; apic < nr_ioapics; apic++) {
if (!ioapic_entries[apic])
return -ENOMEM;
for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
ioapic_entries[apic][pin] =
ioapic_read_entry(apic, pin);
}
return 0;
}
/*
* Mask all IO APIC entries.
*/
void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries)
{
int apic, pin;
if (!ioapic_entries)
return;
for (apic = 0; apic < nr_ioapics; apic++) {
if (!ioapic_entries[apic])
break;
for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
struct IO_APIC_route_entry entry;
entry = early_ioapic_entries[apic][pin] =
ioapic_read_entry(apic, pin);
entry = ioapic_entries[apic][pin];
if (!entry.mask) {
entry.mask = 1;
ioapic_write_entry(apic, pin, entry);
}
}
return 0;
nomem:
while (apic >= 0)
kfree(early_ioapic_entries[apic--]);
memset(early_ioapic_entries, 0,
ARRAY_SIZE(early_ioapic_entries));
return -ENOMEM;
}
void restore_IO_APIC_setup(void)
{
int apic, pin;
for (apic = 0; apic < nr_ioapics; apic++) {
if (!early_ioapic_entries[apic])
break;
for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
ioapic_write_entry(apic, pin,
early_ioapic_entries[apic][pin]);
kfree(early_ioapic_entries[apic]);
early_ioapic_entries[apic] = NULL;
}
}
void reinit_intr_remapped_IO_APIC(int intr_remapping)
/*
* Restore IO APIC entries which was saved in ioapic_entries.
*/
int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries)
{
int apic, pin;
if (!ioapic_entries)
return -ENOMEM;
for (apic = 0; apic < nr_ioapics; apic++) {
if (!ioapic_entries[apic])
return -ENOMEM;
for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
ioapic_write_entry(apic, pin,
ioapic_entries[apic][pin]);
}
return 0;
}
void reinit_intr_remapped_IO_APIC(int intr_remapping,
struct IO_APIC_route_entry **ioapic_entries)
{
/*
* for now plain restore of previous settings.
@@ -922,7 +959,17 @@ void reinit_intr_remapped_IO_APIC(int intr_remapping)
* table entries. for now, do a plain restore, and wait for
* the setup_IO_APIC_irqs() to do proper initialization.
*/
restore_IO_APIC_setup();
restore_IO_APIC_setup(ioapic_entries);
}
void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries)
{
int apic;
for (apic = 0; apic < nr_ioapics; apic++)
kfree(ioapic_entries[apic]);
kfree(ioapic_entries);
}
#endif
@@ -1411,9 +1458,7 @@ void __setup_vector_irq(int cpu)
}
static struct irq_chip ioapic_chip;
#ifdef CONFIG_INTR_REMAP
static struct irq_chip ir_ioapic_chip;
#endif
#define IOAPIC_AUTO -1
#define IOAPIC_EDGE 0
@@ -1452,7 +1497,6 @@ static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long t
else
desc->status &= ~IRQ_LEVEL;
#ifdef CONFIG_INTR_REMAP
if (irq_remapped(irq)) {
desc->status |= IRQ_MOVE_PCNTXT;
if (trigger)
@@ -1464,7 +1508,7 @@ static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long t
handle_edge_irq, "edge");
return;
}
#endif
if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
trigger == IOAPIC_LEVEL)
set_irq_chip_and_handler_name(irq, &ioapic_chip,
@@ -1478,14 +1522,13 @@ static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long t
int setup_ioapic_entry(int apic_id, int irq,
struct IO_APIC_route_entry *entry,
unsigned int destination, int trigger,
int polarity, int vector)
int polarity, int vector, int pin)
{
/*
* add it to the IO-APIC irq-routing table:
*/
memset(entry,0,sizeof(*entry));
#ifdef CONFIG_INTR_REMAP
if (intr_remapping_enabled) {
struct intel_iommu *iommu = map_ioapic_to_ir(apic_id);
struct irte irte;
@@ -1504,7 +1547,14 @@ int setup_ioapic_entry(int apic_id, int irq,
irte.present = 1;
irte.dst_mode = apic->irq_dest_mode;
irte.trigger_mode = trigger;
/*
* Trigger mode in the IRTE will always be edge, and the
* actual level or edge trigger will be setup in the IO-APIC
* RTE. This will help simplify level triggered irq migration.
* For more details, see the comments above explainig IO-APIC
* irq migration in the presence of interrupt-remapping.
*/
irte.trigger_mode = 0;
irte.dlvry_mode = apic->irq_delivery_mode;
irte.vector = vector;
irte.dest_id = IRTE_DEST(destination);
@@ -1515,18 +1565,21 @@ int setup_ioapic_entry(int apic_id, int irq,
ir_entry->zero = 0;
ir_entry->format = 1;
ir_entry->index = (index & 0x7fff);
} else
#endif
{
/*
* IO-APIC RTE will be configured with virtual vector.
* irq handler will do the explicit EOI to the io-apic.
*/
ir_entry->vector = pin;
} else {
entry->delivery_mode = apic->irq_delivery_mode;
entry->dest_mode = apic->irq_dest_mode;
entry->dest = destination;
entry->vector = vector;
}
entry->mask = 0; /* enable IRQ */
entry->trigger = trigger;
entry->polarity = polarity;
entry->vector = vector;
/* Mask level triggered irqs.
* Use IRQ_DELAYED_DISABLE for edge triggered irqs.
@@ -1561,7 +1614,7 @@ static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq
if (setup_ioapic_entry(mp_ioapics[apic_id].apicid, irq, &entry,
dest, trigger, polarity, cfg->vector)) {
dest, trigger, polarity, cfg->vector, pin)) {
printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
mp_ioapics[apic_id].apicid, pin);
__clear_irq_vector(irq, cfg);
@@ -1642,10 +1695,8 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin,
{
struct IO_APIC_route_entry entry;
#ifdef CONFIG_INTR_REMAP
if (intr_remapping_enabled)
return;
#endif
memset(&entry, 0, sizeof(entry));
@@ -2040,8 +2091,13 @@ void disable_IO_APIC(void)
* If the i8259 is routed through an IOAPIC
* Put that IOAPIC in virtual wire mode
* so legacy interrupts can be delivered.
*
* With interrupt-remapping, for now we will use virtual wire A mode,
* as virtual wire B is little complex (need to configure both
* IOAPIC RTE aswell as interrupt-remapping table entry).
* As this gets called during crash dump, keep this simple for now.
*/
if (ioapic_i8259.pin != -1) {
if (ioapic_i8259.pin != -1 && !intr_remapping_enabled) {
struct IO_APIC_route_entry entry;
memset(&entry, 0, sizeof(entry));
@@ -2061,7 +2117,10 @@ void disable_IO_APIC(void)
ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
}
disconnect_bsp_APIC(ioapic_i8259.pin != -1);
/*
* Use virtual wire A mode when interrupt remapping is enabled.
*/
disconnect_bsp_APIC(!intr_remapping_enabled && ioapic_i8259.pin != -1);
}
#ifdef CONFIG_X86_32
@@ -2303,37 +2362,24 @@ static int ioapic_retrigger_irq(unsigned int irq)
#ifdef CONFIG_SMP
#ifdef CONFIG_INTR_REMAP
static void ir_irq_migration(struct work_struct *work);
static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
/*
* Migrate the IO-APIC irq in the presence of intr-remapping.
*
* For edge triggered, irq migration is a simple atomic update(of vector
* and cpu destination) of IRTE and flush the hardware cache.
* For both level and edge triggered, irq migration is a simple atomic
* update(of vector and cpu destination) of IRTE and flush the hardware cache.
*
* For level triggered, we need to modify the io-apic RTE aswell with the update
* vector information, along with modifying IRTE with vector and destination.
* So irq migration for level triggered is little bit more complex compared to
* edge triggered migration. But the good news is, we use the same algorithm
* for level triggered migration as we have today, only difference being,
* we now initiate the irq migration from process context instead of the
* interrupt context.
*
* In future, when we do a directed EOI (combined with cpu EOI broadcast
* suppression) to the IO-APIC, level triggered irq migration will also be
* as simple as edge triggered migration and we can do the irq migration
* with a simple atomic update to IO-APIC RTE.
* For level triggered, we eliminate the io-apic RTE modification (with the
* updated vector information), by using a virtual vector (io-apic pin number).
* Real vector that is used for interrupting cpu will be coming from
* the interrupt-remapping table entry.
*/
static void
migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
{
struct irq_cfg *cfg;
struct irte irte;
int modify_ioapic_rte;
unsigned int dest;
unsigned long flags;
unsigned int irq;
if (!cpumask_intersects(mask, cpu_online_mask))
@@ -2351,13 +2397,6 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
modify_ioapic_rte = desc->status & IRQ_LEVEL;
if (modify_ioapic_rte) {
spin_lock_irqsave(&ioapic_lock, flags);
__target_IO_APIC_irq(irq, dest, cfg);
spin_unlock_irqrestore(&ioapic_lock, flags);
}
irte.vector = cfg->vector;
irte.dest_id = IRTE_DEST(dest);
@@ -2372,73 +2411,12 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
cpumask_copy(desc->affinity, mask);
}
static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
{
int ret = -1;
struct irq_cfg *cfg = desc->chip_data;
mask_IO_APIC_irq_desc(desc);
if (io_apic_level_ack_pending(cfg)) {
/*
* Interrupt in progress. Migrating irq now will change the
* vector information in the IO-APIC RTE and that will confuse
* the EOI broadcast performed by cpu.
* So, delay the irq migration to the next instance.
*/
schedule_delayed_work(&ir_migration_work, 1);
goto unmask;
}
/* everthing is clear. we have right of way */
migrate_ioapic_irq_desc(desc, desc->pending_mask);
ret = 0;
desc->status &= ~IRQ_MOVE_PENDING;
cpumask_clear(desc->pending_mask);
unmask:
unmask_IO_APIC_irq_desc(desc);
return ret;
}
static void ir_irq_migration(struct work_struct *work)
{
unsigned int irq;
struct irq_desc *desc;
for_each_irq_desc(irq, desc) {
if (desc->status & IRQ_MOVE_PENDING) {
unsigned long flags;
spin_lock_irqsave(&desc->lock, flags);
if (!desc->chip->set_affinity ||
!(desc->status & IRQ_MOVE_PENDING)) {
desc->status &= ~IRQ_MOVE_PENDING;
spin_unlock_irqrestore(&desc->lock, flags);
continue;
}
desc->chip->set_affinity(irq, desc->pending_mask);
spin_unlock_irqrestore(&desc->lock, flags);
}
}
}
/*
* Migrates the IRQ destination in the process context.
*/
static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
const struct cpumask *mask)
{
if (desc->status & IRQ_LEVEL) {
desc->status |= IRQ_MOVE_PENDING;
cpumask_copy(desc->pending_mask, mask);
migrate_irq_remapped_level_desc(desc);
return;
}
migrate_ioapic_irq_desc(desc, mask);
}
static void set_ir_ioapic_affinity_irq(unsigned int irq,
@@ -2448,6 +2426,11 @@ static void set_ir_ioapic_affinity_irq(unsigned int irq,
set_ir_ioapic_affinity_irq_desc(desc, mask);
}
#else
static inline void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
const struct cpumask *mask)
{
}
#endif
asmlinkage void smp_irq_move_cleanup_interrupt(void)
@@ -2461,6 +2444,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
me = smp_processor_id();
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
unsigned int irq;
unsigned int irr;
struct irq_desc *desc;
struct irq_cfg *cfg;
irq = __get_cpu_var(vector_irq)[vector];
@@ -2480,6 +2464,18 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
goto unlock;
irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
/*
* Check if the vector that needs to be cleanedup is
* registered at the cpu's IRR. If so, then this is not
* the best time to clean it up. Lets clean it up in the
* next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
* to myself.
*/
if (irr & (1 << (vector % 32))) {
apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
goto unlock;
}
__get_cpu_var(vector_irq)[vector] = -1;
cfg->move_cleanup_count--;
unlock:
@@ -2528,17 +2524,51 @@ static void irq_complete_move(struct irq_desc **descp)
static inline void irq_complete_move(struct irq_desc **descp) {}
#endif
#ifdef CONFIG_INTR_REMAP
#ifdef CONFIG_X86_X2APIC
static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
{
int apic, pin;
struct irq_pin_list *entry;
entry = cfg->irq_2_pin;
for (;;) {
if (!entry)
break;
apic = entry->apic;
pin = entry->pin;
io_apic_eoi(apic, pin);
entry = entry->next;
}
}
static void
eoi_ioapic_irq(struct irq_desc *desc)
{
struct irq_cfg *cfg;
unsigned long flags;
unsigned int irq;
irq = desc->irq;
cfg = desc->chip_data;
spin_lock_irqsave(&ioapic_lock, flags);
__eoi_ioapic_irq(irq, cfg);
spin_unlock_irqrestore(&ioapic_lock, flags);
}
static void ack_x2apic_level(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
ack_x2APIC_irq();
eoi_ioapic_irq(desc);
}
static void ack_x2apic_edge(unsigned int irq)
{
ack_x2APIC_irq();
}
#endif
static void ack_apic_edge(unsigned int irq)
@@ -2649,6 +2679,26 @@ static void ack_apic_level(unsigned int irq)
#endif
}
#ifdef CONFIG_INTR_REMAP
static void ir_ack_apic_edge(unsigned int irq)
{
#ifdef CONFIG_X86_X2APIC
if (x2apic_enabled())
return ack_x2apic_edge(irq);
#endif
return ack_apic_edge(irq);
}
static void ir_ack_apic_level(unsigned int irq)
{
#ifdef CONFIG_X86_X2APIC
if (x2apic_enabled())
return ack_x2apic_level(irq);
#endif
return ack_apic_level(irq);
}
#endif /* CONFIG_INTR_REMAP */
static struct irq_chip ioapic_chip __read_mostly = {
.name = "IO-APIC",
.startup = startup_ioapic_irq,
@@ -2662,20 +2712,20 @@ static struct irq_chip ioapic_chip __read_mostly = {
.retrigger = ioapic_retrigger_irq,
};
#ifdef CONFIG_INTR_REMAP
static struct irq_chip ir_ioapic_chip __read_mostly = {
.name = "IR-IO-APIC",
.startup = startup_ioapic_irq,
.mask = mask_IO_APIC_irq,
.unmask = unmask_IO_APIC_irq,
.ack = ack_x2apic_edge,
.eoi = ack_x2apic_level,
#ifdef CONFIG_INTR_REMAP
.ack = ir_ack_apic_edge,
.eoi = ir_ack_apic_level,
#ifdef CONFIG_SMP
.set_affinity = set_ir_ioapic_affinity_irq,
#endif
#endif
.retrigger = ioapic_retrigger_irq,
};
#endif
static inline void init_IO_APIC_traps(void)
{
@@ -2901,10 +2951,8 @@ static inline void __init check_timer(void)
* 8259A.
*/
if (pin1 == -1) {
#ifdef CONFIG_INTR_REMAP
if (intr_remapping_enabled)
panic("BIOS bug: timer not connected to IO-APIC");
#endif
pin1 = pin2;
apic1 = apic2;
no_pin1 = 1;
@@ -2940,10 +2988,8 @@ static inline void __init check_timer(void)
clear_IO_APIC_pin(0, pin1);
goto out;
}
#ifdef CONFIG_INTR_REMAP
if (intr_remapping_enabled)
panic("timer doesn't work through Interrupt-remapped IO-APIC");
#endif
local_irq_disable();
clear_IO_APIC_pin(apic1, pin1);
if (!no_pin1)
@@ -3237,9 +3283,7 @@ void destroy_irq(unsigned int irq)
if (desc)
desc->chip_data = cfg;
#ifdef CONFIG_INTR_REMAP
free_irte(irq);
#endif
spin_lock_irqsave(&vector_lock, flags);
__clear_irq_vector(irq, cfg);
spin_unlock_irqrestore(&vector_lock, flags);
@@ -3265,7 +3309,6 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
#ifdef CONFIG_INTR_REMAP
if (irq_remapped(irq)) {
struct irte irte;
int ir_index;
@@ -3291,10 +3334,13 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
MSI_ADDR_IR_SHV |
MSI_ADDR_IR_INDEX1(ir_index) |
MSI_ADDR_IR_INDEX2(ir_index);
} else
#endif
{
msg->address_hi = MSI_ADDR_BASE_HI;
} else {
if (x2apic_enabled())
msg->address_hi = MSI_ADDR_BASE_HI |
MSI_ADDR_EXT_DEST_ID(dest);
else
msg->address_hi = MSI_ADDR_BASE_HI;
msg->address_lo =
MSI_ADDR_BASE_LO |
((apic->irq_dest_mode == 0) ?
@@ -3394,14 +3440,15 @@ static struct irq_chip msi_chip = {
.retrigger = ioapic_retrigger_irq,
};
#ifdef CONFIG_INTR_REMAP
static struct irq_chip msi_ir_chip = {
.name = "IR-PCI-MSI",
.unmask = unmask_msi_irq,
.mask = mask_msi_irq,
.ack = ack_x2apic_edge,
#ifdef CONFIG_INTR_REMAP
.ack = ir_ack_apic_edge,
#ifdef CONFIG_SMP
.set_affinity = ir_set_msi_irq_affinity,
#endif
#endif
.retrigger = ioapic_retrigger_irq,
};
@@ -3432,7 +3479,6 @@ static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
}
return index;
}
#endif
static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
{
@@ -3446,7 +3492,6 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
set_irq_msi(irq, msidesc);
write_msi_msg(irq, &msg);
#ifdef CONFIG_INTR_REMAP
if (irq_remapped(irq)) {
struct irq_desc *desc = irq_to_desc(irq);
/*
@@ -3455,7 +3500,6 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
desc->status |= IRQ_MOVE_PCNTXT;
set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge");
} else
#endif
set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq);
@@ -3469,11 +3513,12 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
int ret, sub_handle;
struct msi_desc *msidesc;
unsigned int irq_want;
#ifdef CONFIG_INTR_REMAP
struct intel_iommu *iommu = 0;
struct intel_iommu *iommu = NULL;
int index = 0;
#endif
/* x86 doesn't support multiple MSI yet */
if (type == PCI_CAP_ID_MSI && nvec > 1)
return 1;
irq_want = nr_irqs_gsi;
sub_handle = 0;
@@ -3482,7 +3527,6 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
if (irq == 0)
return -1;
irq_want = irq + 1;
#ifdef CONFIG_INTR_REMAP
if (!intr_remapping_enabled)
goto no_ir;
@@ -3510,7 +3554,6 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
set_irte_irq(irq, iommu, index, sub_handle);
}
no_ir:
#endif
ret = setup_msi_irq(dev, msidesc, irq);
if (ret < 0)
goto error;
@@ -3528,7 +3571,7 @@ void arch_teardown_msi_irq(unsigned int irq)
destroy_irq(irq);
}
#ifdef CONFIG_DMAR
#if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP)
#ifdef CONFIG_SMP
static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
{
@@ -3609,7 +3652,7 @@ static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
#endif /* CONFIG_SMP */
struct irq_chip hpet_msi_type = {
static struct irq_chip hpet_msi_type = {
.name = "HPET_MSI",
.unmask = hpet_msi_unmask,
.mask = hpet_msi_mask,
@@ -4045,11 +4088,9 @@ void __init setup_ioapic_dest(void)
else
mask = apic->target_cpus();
#ifdef CONFIG_INTR_REMAP
if (intr_remapping_enabled)
set_ir_ioapic_affinity_irq_desc(desc, mask);
else
#endif
set_ioapic_affinity_irq_desc(desc, mask);
}
@@ -4142,9 +4183,12 @@ static int __init ioapic_insert_resources(void)
struct resource *r = ioapic_resources;
if (!r) {
printk(KERN_ERR
"IO APIC resources could be not be allocated.\n");
return -1;
if (nr_ioapics > 0) {
printk(KERN_ERR
"IO APIC resources couldn't be allocated.\n");
return -1;
}
return 0;
}
for (i = 0; i < nr_ioapics; i++) {

Просмотреть файл

@@ -39,7 +39,7 @@
int unknown_nmi_panic;
int nmi_watchdog_enabled;
static cpumask_t backtrace_mask = CPU_MASK_NONE;
static cpumask_var_t backtrace_mask;
/* nmi_active:
* >0: the lapic NMI watchdog is active, but can be disabled
@@ -138,6 +138,7 @@ int __init check_nmi_watchdog(void)
if (!prev_nmi_count)
goto error;
alloc_cpumask_var(&backtrace_mask, GFP_KERNEL);
printk(KERN_INFO "Testing NMI watchdog ... ");
#ifdef CONFIG_SMP
@@ -413,14 +414,14 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
touched = 1;
}
if (cpu_isset(cpu, backtrace_mask)) {
if (cpumask_test_cpu(cpu, backtrace_mask)) {
static DEFINE_SPINLOCK(lock); /* Serialise the printks */
spin_lock(&lock);
printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
dump_stack();
spin_unlock(&lock);
cpu_clear(cpu, backtrace_mask);
cpumask_clear_cpu(cpu, backtrace_mask);
}
/* Could check oops_in_progress here too, but it's safer not to */
@@ -554,10 +555,10 @@ void __trigger_all_cpu_backtrace(void)
{
int i;
backtrace_mask = cpu_online_map;
cpumask_copy(backtrace_mask, cpu_online_mask);
/* Wait for up to 10 seconds for all CPUs to do the backtrace */
for (i = 0; i < 10 * 1000; i++) {
if (cpus_empty(backtrace_mask))
if (cpumask_empty(backtrace_mask))
break;
mdelay(1);
}

Просмотреть файл

@@ -334,9 +334,9 @@ static inline void numaq_smp_callin_clear_local_apic(void)
clear_local_APIC();
}
static inline const cpumask_t *numaq_target_cpus(void)
static inline const struct cpumask *numaq_target_cpus(void)
{
return &CPU_MASK_ALL;
return cpu_all_mask;
}
static inline unsigned long
@@ -427,7 +427,7 @@ static inline int numaq_check_phys_apicid_present(int boot_cpu_physical_apicid)
* We use physical apicids here, not logical, so just return the default
* physical broadcast to stop people from breaking us
*/
static inline unsigned int numaq_cpu_mask_to_apicid(const cpumask_t *cpumask)
static unsigned int numaq_cpu_mask_to_apicid(const struct cpumask *cpumask)
{
return 0x0F;
}
@@ -462,7 +462,7 @@ static int probe_numaq(void)
return found_numaq;
}
static void numaq_vector_allocation_domain(int cpu, cpumask_t *retmask)
static void numaq_vector_allocation_domain(int cpu, struct cpumask *retmask)
{
/* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest
@@ -472,7 +472,8 @@ static void numaq_vector_allocation_domain(int cpu, cpumask_t *retmask)
* deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination.
*/
*retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
cpumask_clear(retmask);
cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
}
static void numaq_setup_portio_remap(void)

Просмотреть файл

@@ -83,7 +83,8 @@ static void default_vector_allocation_domain(int cpu, struct cpumask *retmask)
* deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination.
*/
*retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } };
cpumask_clear(retmask);
cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
}
/* should be called last. */

Просмотреть файл

@@ -68,6 +68,13 @@ void __init default_setup_apic_routing(void)
apic = &apic_physflat;
printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
}
/*
* Now that apic routing model is selected, configure the
* fault handling for intr remapping.
*/
if (intr_remapping_enabled)
enable_drhd_fault_handling();
}
/* Same for both flat and physical. */

Просмотреть файл

@@ -53,23 +53,19 @@ static unsigned summit_get_apic_id(unsigned long x)
return (x >> 24) & 0xFF;
}
static inline void summit_send_IPI_mask(const cpumask_t *mask, int vector)
static inline void summit_send_IPI_mask(const struct cpumask *mask, int vector)
{
default_send_IPI_mask_sequence_logical(mask, vector);
}
static void summit_send_IPI_allbutself(int vector)
{
cpumask_t mask = cpu_online_map;
cpu_clear(smp_processor_id(), mask);
if (!cpus_empty(mask))
summit_send_IPI_mask(&mask, vector);
default_send_IPI_mask_allbutself_logical(cpu_online_mask, vector);
}
static void summit_send_IPI_all(int vector)
{
summit_send_IPI_mask(&cpu_online_map, vector);
summit_send_IPI_mask(cpu_online_mask, vector);
}
#include <asm/tsc.h>
@@ -186,13 +182,13 @@ static inline int is_WPEG(struct rio_detail *rio){
#define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER)
static const cpumask_t *summit_target_cpus(void)
static const struct cpumask *summit_target_cpus(void)
{
/* CPU_MASK_ALL (0xff) has undefined behaviour with
* dest_LowestPrio mode logical clustered apic interrupt routing
* Just start on cpu 0. IRQ balancing will spread load
*/
return &cpumask_of_cpu(0);
return cpumask_of(0);
}
static unsigned long summit_check_apicid_used(physid_mask_t bitmap, int apicid)
@@ -289,7 +285,7 @@ static int summit_check_phys_apicid_present(int boot_cpu_physical_apicid)
return 1;
}
static unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask)
static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask)
{
unsigned int round = 0;
int cpu, apicid = 0;
@@ -346,7 +342,7 @@ static int probe_summit(void)
return 0;
}
static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask)
static void summit_vector_allocation_domain(int cpu, struct cpumask *retmask)
{
/* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest
@@ -356,7 +352,8 @@ static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask)
* deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination.
*/
*retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
cpumask_clear(retmask);
cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
}
#ifdef CONFIG_X86_SUMMIT_NUMA

Просмотреть файл

@@ -57,6 +57,8 @@ static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
unsigned long query_cpu;
unsigned long flags;
x2apic_wrmsr_fence();
local_irq_save(flags);
for_each_cpu(query_cpu, mask) {
__x2apic_send_IPI_dest(
@@ -73,6 +75,8 @@ static void
unsigned long query_cpu;
unsigned long flags;
x2apic_wrmsr_fence();
local_irq_save(flags);
for_each_cpu(query_cpu, mask) {
if (query_cpu == this_cpu)
@@ -90,6 +94,8 @@ static void x2apic_send_IPI_allbutself(int vector)
unsigned long query_cpu;
unsigned long flags;
x2apic_wrmsr_fence();
local_irq_save(flags);
for_each_online_cpu(query_cpu) {
if (query_cpu == this_cpu)

Просмотреть файл

@@ -58,6 +58,8 @@ static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
unsigned long query_cpu;
unsigned long flags;
x2apic_wrmsr_fence();
local_irq_save(flags);
for_each_cpu(query_cpu, mask) {
__x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
@@ -73,6 +75,8 @@ static void
unsigned long query_cpu;
unsigned long flags;
x2apic_wrmsr_fence();
local_irq_save(flags);
for_each_cpu(query_cpu, mask) {
if (query_cpu != this_cpu)
@@ -89,6 +93,8 @@ static void x2apic_send_IPI_allbutself(int vector)
unsigned long query_cpu;
unsigned long flags;
x2apic_wrmsr_fence();
local_irq_save(flags);
for_each_online_cpu(query_cpu) {
if (query_cpu == this_cpu)

Просмотреть файл

@@ -118,17 +118,12 @@ static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
static void uv_send_IPI_one(int cpu, int vector)
{
unsigned long val, apicid;
unsigned long apicid;
int pnode;
apicid = per_cpu(x86_cpu_to_apicid, cpu);
pnode = uv_apicid_to_pnode(apicid);
val = (1UL << UVH_IPI_INT_SEND_SHFT) |
(apicid << UVH_IPI_INT_APIC_ID_SHFT) |
(vector << UVH_IPI_INT_VECTOR_SHFT);
uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
uv_hub_send_ipi(pnode, apicid, vector);
}
static void uv_send_IPI_mask(const struct cpumask *mask, int vector)

Просмотреть файл

@@ -466,7 +466,7 @@ static const lookup_t error_table[] = {
* @err: APM BIOS return code
*
* Write a meaningful log entry to the kernel log in the event of
* an APM error.
* an APM error. Note that this also handles (negative) kernel errors.
*/
static void apm_error(char *str, int err)
@@ -478,42 +478,13 @@ static void apm_error(char *str, int err)
break;
if (i < ERROR_COUNT)
printk(KERN_NOTICE "apm: %s: %s\n", str, error_table[i].msg);
else if (err < 0)
printk(KERN_NOTICE "apm: %s: linux error code %i\n", str, err);
else
printk(KERN_NOTICE "apm: %s: unknown error code %#2.2x\n",
str, err);
}
/*
* Lock APM functionality to physical CPU 0
*/
#ifdef CONFIG_SMP
static cpumask_t apm_save_cpus(void)
{
cpumask_t x = current->cpus_allowed;
/* Some bioses don't like being called from CPU != 0 */
set_cpus_allowed(current, cpumask_of_cpu(0));
BUG_ON(smp_processor_id() != 0);
return x;
}
static inline void apm_restore_cpus(cpumask_t mask)
{
set_cpus_allowed(current, mask);
}
#else
/*
* No CPU lockdown needed on a uniprocessor
*/
#define apm_save_cpus() (current->cpus_allowed)
#define apm_restore_cpus(x) (void)(x)
#endif
/*
* These are the actual BIOS calls. Depending on APM_ZERO_SEGS and
* apm_info.allow_ints, we are being really paranoid here! Not only
@@ -568,16 +539,23 @@ static inline void apm_irq_restore(unsigned long flags)
# define APM_DO_RESTORE_SEGS
#endif
struct apm_bios_call {
u32 func;
/* In and out */
u32 ebx;
u32 ecx;
/* Out only */
u32 eax;
u32 edx;
u32 esi;
/* Error: -ENOMEM, or bits 8-15 of eax */
int err;
};
/**
* apm_bios_call - Make an APM BIOS 32bit call
* @func: APM function to execute
* @ebx_in: EBX register for call entry
* @ecx_in: ECX register for call entry
* @eax: EAX register return
* @ebx: EBX register return
* @ecx: ECX register return
* @edx: EDX register return
* @esi: ESI register return
* __apm_bios_call - Make an APM BIOS 32bit call
* @_call: pointer to struct apm_bios_call.
*
* Make an APM call using the 32bit protected mode interface. The
* caller is responsible for knowing if APM BIOS is configured and
@@ -586,35 +564,109 @@ static inline void apm_irq_restore(unsigned long flags)
* flag is loaded into AL. If there is an error, then the error
* code is returned in AH (bits 8-15 of eax) and this function
* returns non-zero.
*
* Note: this makes the call on the current CPU.
*/
static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in,
u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, u32 *esi)
static long __apm_bios_call(void *_call)
{
APM_DECL_SEGS
unsigned long flags;
cpumask_t cpus;
int cpu;
struct desc_struct save_desc_40;
struct desc_struct *gdt;
cpus = apm_save_cpus();
struct apm_bios_call *call = _call;
cpu = get_cpu();
BUG_ON(cpu != 0);
gdt = get_cpu_gdt_table(cpu);
save_desc_40 = gdt[0x40 / 8];
gdt[0x40 / 8] = bad_bios_desc;
apm_irq_save(flags);
APM_DO_SAVE_SEGS;
apm_bios_call_asm(func, ebx_in, ecx_in, eax, ebx, ecx, edx, esi);
apm_bios_call_asm(call->func, call->ebx, call->ecx,
&call->eax, &call->ebx, &call->ecx, &call->edx,
&call->esi);
APM_DO_RESTORE_SEGS;
apm_irq_restore(flags);
gdt[0x40 / 8] = save_desc_40;
put_cpu();
apm_restore_cpus(cpus);
return *eax & 0xff;
return call->eax & 0xff;
}
/* Run __apm_bios_call or __apm_bios_call_simple on CPU 0 */
static int on_cpu0(long (*fn)(void *), struct apm_bios_call *call)
{
int ret;
/* Don't bother with work_on_cpu in the common case, so we don't
* have to worry about OOM or overhead. */
if (get_cpu() == 0) {
ret = fn(call);
put_cpu();
} else {
put_cpu();
ret = work_on_cpu(0, fn, call);
}
/* work_on_cpu can fail with -ENOMEM */
if (ret < 0)
call->err = ret;
else
call->err = (call->eax >> 8) & 0xff;
return ret;
}
/**
* apm_bios_call - Make an APM BIOS 32bit call (on CPU 0)
* @call: the apm_bios_call registers.
*
* If there is an error, it is returned in @call.err.
*/
static int apm_bios_call(struct apm_bios_call *call)
{
return on_cpu0(__apm_bios_call, call);
}
/**
* __apm_bios_call_simple - Make an APM BIOS 32bit call (on CPU 0)
* @_call: pointer to struct apm_bios_call.
*
* Make a BIOS call that returns one value only, or just status.
* If there is an error, then the error code is returned in AH
* (bits 8-15 of eax) and this function returns non-zero (it can
* also return -ENOMEM). This is used for simpler BIOS operations.
* This call may hold interrupts off for a long time on some laptops.
*
* Note: this makes the call on the current CPU.
*/
static long __apm_bios_call_simple(void *_call)
{
u8 error;
APM_DECL_SEGS
unsigned long flags;
int cpu;
struct desc_struct save_desc_40;
struct desc_struct *gdt;
struct apm_bios_call *call = _call;
cpu = get_cpu();
BUG_ON(cpu != 0);
gdt = get_cpu_gdt_table(cpu);
save_desc_40 = gdt[0x40 / 8];
gdt[0x40 / 8] = bad_bios_desc;
apm_irq_save(flags);
APM_DO_SAVE_SEGS;
error = apm_bios_call_simple_asm(call->func, call->ebx, call->ecx,
&call->eax);
APM_DO_RESTORE_SEGS;
apm_irq_restore(flags);
gdt[0x40 / 8] = save_desc_40;
put_cpu();
return error;
}
/**
@@ -623,40 +675,28 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in,
* @ebx_in: EBX register value for BIOS call
* @ecx_in: ECX register value for BIOS call
* @eax: EAX register on return from the BIOS call
* @err: bits
*
* Make a BIOS call that returns one value only, or just status.
* If there is an error, then the error code is returned in AH
* (bits 8-15 of eax) and this function returns non-zero. This is
* used for simpler BIOS operations. This call may hold interrupts
* off for a long time on some laptops.
* If there is an error, then the error code is returned in @err
* and this function returns non-zero. This is used for simpler
* BIOS operations. This call may hold interrupts off for a long
* time on some laptops.
*/
static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax)
static int apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax,
int *err)
{
u8 error;
APM_DECL_SEGS
unsigned long flags;
cpumask_t cpus;
int cpu;
struct desc_struct save_desc_40;
struct desc_struct *gdt;
struct apm_bios_call call;
int ret;
cpus = apm_save_cpus();
call.func = func;
call.ebx = ebx_in;
call.ecx = ecx_in;
cpu = get_cpu();
gdt = get_cpu_gdt_table(cpu);
save_desc_40 = gdt[0x40 / 8];
gdt[0x40 / 8] = bad_bios_desc;
apm_irq_save(flags);
APM_DO_SAVE_SEGS;
error = apm_bios_call_simple_asm(func, ebx_in, ecx_in, eax);
APM_DO_RESTORE_SEGS;
apm_irq_restore(flags);
gdt[0x40 / 8] = save_desc_40;
put_cpu();
apm_restore_cpus(cpus);
return error;
ret = on_cpu0(__apm_bios_call_simple, &call);
*eax = call.eax;
*err = call.err;
return ret;
}
/**
@@ -678,9 +718,10 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax)
static int apm_driver_version(u_short *val)
{
u32 eax;
int err;
if (apm_bios_call_simple(APM_FUNC_VERSION, 0, *val, &eax))
return (eax >> 8) & 0xff;
if (apm_bios_call_simple(APM_FUNC_VERSION, 0, *val, &eax, &err))
return err;
*val = eax;
return APM_SUCCESS;
}
@@ -701,22 +742,21 @@ static int apm_driver_version(u_short *val)
* that APM 1.2 is in use. If no messges are pending the value 0x80
* is returned (No power management events pending).
*/
static int apm_get_event(apm_event_t *event, apm_eventinfo_t *info)
{
u32 eax;
u32 ebx;
u32 ecx;
u32 dummy;
struct apm_bios_call call;
if (apm_bios_call(APM_FUNC_GET_EVENT, 0, 0, &eax, &ebx, &ecx,
&dummy, &dummy))
return (eax >> 8) & 0xff;
*event = ebx;
call.func = APM_FUNC_GET_EVENT;
call.ebx = call.ecx = 0;
if (apm_bios_call(&call))
return call.err;
*event = call.ebx;
if (apm_info.connection_version < 0x0102)
*info = ~0; /* indicate info not valid */
else
*info = ecx;
*info = call.ecx;
return APM_SUCCESS;
}
@@ -737,9 +777,10 @@ static int apm_get_event(apm_event_t *event, apm_eventinfo_t *info)
static int set_power_state(u_short what, u_short state)
{
u32 eax;
int err;
if (apm_bios_call_simple(APM_FUNC_SET_STATE, what, state, &eax))
return (eax >> 8) & 0xff;
if (apm_bios_call_simple(APM_FUNC_SET_STATE, what, state, &eax, &err))
return err;
return APM_SUCCESS;
}
@@ -770,6 +811,7 @@ static int apm_do_idle(void)
u8 ret = 0;
int idled = 0;
int polling;
int err;
polling = !!(current_thread_info()->status & TS_POLLING);
if (polling) {
@@ -782,7 +824,7 @@ static int apm_do_idle(void)
}
if (!need_resched()) {
idled = 1;
ret = apm_bios_call_simple(APM_FUNC_IDLE, 0, 0, &eax);
ret = apm_bios_call_simple(APM_FUNC_IDLE, 0, 0, &eax, &err);
}
if (polling)
current_thread_info()->status |= TS_POLLING;
@@ -797,8 +839,7 @@ static int apm_do_idle(void)
* Only report the failure the first 5 times.
*/
if (++t < 5) {
printk(KERN_DEBUG "apm_do_idle failed (%d)\n",
(eax >> 8) & 0xff);
printk(KERN_DEBUG "apm_do_idle failed (%d)\n", err);
t = jiffies;
}
return -1;
@@ -816,9 +857,10 @@ static int apm_do_idle(void)
static void apm_do_busy(void)
{
u32 dummy;
int err;
if (clock_slowed || ALWAYS_CALL_BUSY) {
(void)apm_bios_call_simple(APM_FUNC_BUSY, 0, 0, &dummy);
(void)apm_bios_call_simple(APM_FUNC_BUSY, 0, 0, &dummy, &err);
clock_slowed = 0;
}
}
@@ -937,7 +979,7 @@ static void apm_power_off(void)
/* Some bioses don't like being called from CPU != 0 */
if (apm_info.realmode_power_off) {
(void)apm_save_cpus();
set_cpus_allowed_ptr(current, cpumask_of(0));
machine_real_restart(po_bios_call, sizeof(po_bios_call));
} else {
(void)set_system_power_state(APM_STATE_OFF);
@@ -956,12 +998,13 @@ static void apm_power_off(void)
static int apm_enable_power_management(int enable)
{
u32 eax;
int err;
if ((enable == 0) && (apm_info.bios.flags & APM_BIOS_DISENGAGED))
return APM_NOT_ENGAGED;
if (apm_bios_call_simple(APM_FUNC_ENABLE_PM, APM_DEVICE_BALL,
enable, &eax))
return (eax >> 8) & 0xff;
enable, &eax, &err))
return err;
if (enable)
apm_info.bios.flags &= ~APM_BIOS_DISABLED;
else
@@ -986,24 +1029,23 @@ static int apm_enable_power_management(int enable)
static int apm_get_power_status(u_short *status, u_short *bat, u_short *life)
{
u32 eax;
u32 ebx;
u32 ecx;
u32 edx;
u32 dummy;
struct apm_bios_call call;
call.func = APM_FUNC_GET_STATUS;
call.ebx = APM_DEVICE_ALL;
call.ecx = 0;
if (apm_info.get_power_status_broken)
return APM_32_UNSUPPORTED;
if (apm_bios_call(APM_FUNC_GET_STATUS, APM_DEVICE_ALL, 0,
&eax, &ebx, &ecx, &edx, &dummy))
return (eax >> 8) & 0xff;
*status = ebx;
*bat = ecx;
if (apm_bios_call(&call))
return call.err;
*status = call.ebx;
*bat = call.ecx;
if (apm_info.get_power_status_swabinminutes) {
*life = swab16((u16)edx);
*life = swab16((u16)call.edx);
*life |= 0x8000;
} else
*life = edx;
*life = call.edx;
return APM_SUCCESS;
}
@@ -1048,12 +1090,14 @@ static int apm_get_battery_status(u_short which, u_short *status,
static int apm_engage_power_management(u_short device, int enable)
{
u32 eax;
int err;
if ((enable == 0) && (device == APM_DEVICE_ALL)
&& (apm_info.bios.flags & APM_BIOS_DISABLED))
return APM_DISABLED;
if (apm_bios_call_simple(APM_FUNC_ENGAGE_PM, device, enable, &eax))
return (eax >> 8) & 0xff;
if (apm_bios_call_simple(APM_FUNC_ENGAGE_PM, device, enable,
&eax, &err))
return err;
if (device == APM_DEVICE_ALL) {
if (enable)
apm_info.bios.flags &= ~APM_BIOS_DISENGAGED;
@@ -1190,8 +1234,10 @@ static int suspend(int vetoable)
struct apm_user *as;
device_suspend(PMSG_SUSPEND);
local_irq_disable();
device_power_down(PMSG_SUSPEND);
local_irq_disable();
sysdev_suspend(PMSG_SUSPEND);
local_irq_enable();
@@ -1209,9 +1255,12 @@ static int suspend(int vetoable)
if (err != APM_SUCCESS)
apm_error("suspend", err);
err = (err == APM_SUCCESS) ? 0 : -EIO;
sysdev_resume();
device_power_up(PMSG_RESUME);
local_irq_enable();
device_power_up(PMSG_RESUME);
device_resume(PMSG_RESUME);
queue_event(APM_NORMAL_RESUME, NULL);
spin_lock(&user_list_lock);
@@ -1228,8 +1277,9 @@ static void standby(void)
{
int err;
local_irq_disable();
device_power_down(PMSG_SUSPEND);
local_irq_disable();
sysdev_suspend(PMSG_SUSPEND);
local_irq_enable();
@@ -1239,8 +1289,9 @@ static void standby(void)
local_irq_disable();
sysdev_resume();
device_power_up(PMSG_RESUME);
local_irq_enable();
device_power_up(PMSG_RESUME);
}
static apm_event_t get_event(void)
@@ -1682,16 +1733,14 @@ static int apm(void *unused)
char *power_stat;
char *bat_stat;
#ifdef CONFIG_SMP
/* 2002/08/01 - WT
* This is to avoid random crashes at boot time during initialization
* on SMP systems in case of "apm=power-off" mode. Seen on ASUS A7M266D.
* Some bioses don't like being called from CPU != 0.
* Method suggested by Ingo Molnar.
*/
set_cpus_allowed(current, cpumask_of_cpu(0));
set_cpus_allowed_ptr(current, cpumask_of(0));
BUG_ON(smp_processor_id() != 0);
#endif
if (apm_info.connection_version == 0) {
apm_info.connection_version = apm_info.bios.version;

Просмотреть файл

@@ -18,6 +18,7 @@
#include <asm/thread_info.h>
#include <asm/bootparam.h>
#include <asm/elf.h>
#include <asm/suspend.h>
#include <xen/interface/xen.h>

Просмотреть файл

@@ -16,6 +16,7 @@
#include <asm/thread_info.h>
#include <asm/ia32.h>
#include <asm/bootparam.h>
#include <asm/suspend.h>
#include <xen/interface/xen.h>

Просмотреть файл

@@ -83,15 +83,15 @@ void __init setup_bios_corruption_check(void)
u64 size;
addr = find_e820_area_size(addr, &size, PAGE_SIZE);
if (addr == 0)
if (!(addr + 1))
break;
if (addr >= corruption_check_size)
break;
if ((addr + size) > corruption_check_size)
size = corruption_check_size - addr;
if (size == 0)
break;
e820_update_range(addr, size, E820_RAM, E820_RESERVED);
scan_areas[num_scan_areas].addr = addr;
scan_areas[num_scan_areas].size = size;

Просмотреть файл

@@ -14,11 +14,12 @@ obj-y += vmware.o hypervisor.o
obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o
obj-$(CONFIG_X86_64) += bugs_64.o
obj-$(CONFIG_X86_CPU_DEBUG) += cpu_debug.o
obj-$(CONFIG_CPU_SUP_INTEL) += intel.o
obj-$(CONFIG_CPU_SUP_AMD) += amd.o
obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
obj-$(CONFIG_CPU_SUP_CENTAUR_32) += centaur.o
obj-$(CONFIG_CPU_SUP_CENTAUR_64) += centaur_64.o
obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o
obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o

Просмотреть файл

@@ -29,7 +29,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
u32 regs[4];
const struct cpuid_bit *cb;
static const struct cpuid_bit cpuid_bits[] = {
static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
{ X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 },
{ 0, 0, 0, 0 }
};

Просмотреть файл

@@ -502,7 +502,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int
}
#endif
static struct cpu_dev amd_cpu_dev __cpuinitdata = {
static const struct cpu_dev __cpuinitconst amd_cpu_dev = {
.c_vendor = "AMD",
.c_ident = { "AuthenticAMD" },
#ifdef CONFIG_X86_32

Просмотреть файл

@@ -1,11 +1,11 @@
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <asm/processor.h>
#include <asm/msr.h>
#include <asm/e820.h>
#include <asm/mtrr.h>
#include <asm/msr.h>
#include "cpu.h"
@@ -276,7 +276,7 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c)
*/
c->x86_capability[5] = cpuid_edx(0xC0000001);
}
#ifdef CONFIG_X86_32
/* Cyrix III family needs CX8 & PGE explicitly enabled. */
if (c->x86_model >= 6 && c->x86_model <= 9) {
rdmsr(MSR_VIA_FCR, lo, hi);
@@ -288,6 +288,11 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c)
/* Before Nehemiah, the C3's had 3dNOW! */
if (c->x86_model >= 6 && c->x86_model < 9)
set_cpu_cap(c, X86_FEATURE_3DNOW);
#endif
if (c->x86 == 0x6 && c->x86_model >= 0xf) {
c->x86_cache_alignment = c->x86_clflush_size * 2;
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
}
display_cacheinfo(c);
}
@@ -316,16 +321,25 @@ enum {
static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c)
{
switch (c->x86) {
#ifdef CONFIG_X86_32
case 5:
/* Emulate MTRRs using Centaur's MCR. */
set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR);
break;
#endif
case 6:
if (c->x86_model >= 0xf)
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
break;
}
#ifdef CONFIG_X86_64
set_cpu_cap(c, X86_FEATURE_SYSENTER32);
#endif
}
static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_X86_32
char *name;
u32 fcr_set = 0;
u32 fcr_clr = 0;
@@ -337,8 +351,10 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
* 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
*/
clear_cpu_cap(c, 0*32+31);
#endif
early_init_centaur(c);
switch (c->x86) {
#ifdef CONFIG_X86_32
case 5:
switch (c->x86_model) {
case 4:
@@ -442,16 +458,20 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
}
sprintf(c->x86_model_id, "WinChip %s", name);
break;
#endif
case 6:
init_c3(c);
break;
}
#ifdef CONFIG_X86_64
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
#endif
}
static unsigned int __cpuinit
centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
{
#ifdef CONFIG_X86_32
/* VIA C3 CPUs (670-68F) need further shifting. */
if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
size >>= 8;
@@ -464,11 +484,11 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
if ((c->x86 == 6) && (c->x86_model == 9) &&
(c->x86_mask == 1) && (size == 65))
size -= 1;
#endif
return size;
}
static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
static const struct cpu_dev __cpuinitconst centaur_cpu_dev = {
.c_vendor = "Centaur",
.c_ident = { "CentaurHauls" },
.c_early_init = early_init_centaur,

Просмотреть файл

@@ -1,37 +0,0 @@
#include <linux/init.h>
#include <linux/smp.h>
#include <asm/cpufeature.h>
#include <asm/processor.h>
#include "cpu.h"
static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c)
{
if (c->x86 == 0x6 && c->x86_model >= 0xf)
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_SYSENTER32);
}
static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
{
early_init_centaur(c);
if (c->x86 == 0x6 && c->x86_model >= 0xf) {
c->x86_cache_alignment = c->x86_clflush_size * 2;
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
}
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
}
static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
.c_vendor = "Centaur",
.c_ident = { "CentaurHauls" },
.c_early_init = early_init_centaur,
.c_init = init_centaur,
.c_x86_vendor = X86_VENDOR_CENTAUR,
};
cpu_dev_register(centaur_cpu_dev);

Просмотреть файл

@@ -1,52 +1,50 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/bootmem.h>
#include <linux/linkage.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/kgdb.h>
#include <linux/topology.h>
#include <linux/delay.h>
#include <linux/smp.h>
#include <linux/percpu.h>
#include <asm/i387.h>
#include <asm/msr.h>
#include <asm/io.h>
#include <asm/linkage.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/kgdb.h>
#include <linux/smp.h>
#include <linux/io.h>
#include <asm/stackprotector.h>
#include <asm/mmu_context.h>
#include <asm/mtrr.h>
#include <asm/mce.h>
#include <asm/pat.h>
#include <asm/asm.h>
#include <asm/numa.h>
#include <asm/smp.h>
#include <asm/cpu.h>
#include <asm/hypervisor.h>
#include <asm/processor.h>
#include <asm/sections.h>
#include <asm/topology.h>
#include <asm/cpumask.h>
#include <asm/pgtable.h>
#include <asm/atomic.h>
#include <asm/proto.h>
#include <asm/setup.h>
#include <asm/apic.h>
#include <asm/desc.h>
#include <asm/i387.h>
#include <asm/mtrr.h>
#include <asm/numa.h>
#include <asm/asm.h>
#include <asm/cpu.h>
#include <asm/mce.h>
#include <asm/msr.h>
#include <asm/pat.h>
#include <asm/smp.h>
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/uv/uv.h>
#endif
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/desc.h>
#include <asm/atomic.h>
#include <asm/proto.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/hypervisor.h>
#include <asm/stackprotector.h>
#include "cpu.h"
#ifdef CONFIG_X86_64
/* all of these masks are initialized in setup_cpu_local_masks() */
cpumask_var_t cpu_callin_mask;
cpumask_var_t cpu_callout_mask;
cpumask_var_t cpu_initialized_mask;
cpumask_var_t cpu_callout_mask;
cpumask_var_t cpu_callin_mask;
/* representing cpus for which sibling maps can be computed */
cpumask_var_t cpu_sibling_setup_mask;
@@ -60,17 +58,7 @@ void __init setup_cpu_local_masks(void)
alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
}
#else /* CONFIG_X86_32 */
cpumask_t cpu_callin_map;
cpumask_t cpu_callout_map;
cpumask_t cpu_initialized;
cpumask_t cpu_sibling_setup_map;
#endif /* CONFIG_X86_32 */
static struct cpu_dev *this_cpu __cpuinitdata;
static const struct cpu_dev *this_cpu __cpuinitdata;
DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
#ifdef CONFIG_X86_64
@@ -79,48 +67,48 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
* IRET will check the segment types kkeil 2000/10/28
* Also sysret mandates a special GDT layout
*
* The TLS descriptors are currently at a different place compared to i386.
* TLS descriptors are currently at a different place compared to i386.
* Hopefully nobody expects them at a fixed place (Wine?)
*/
[GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
[GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
[GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
[GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
[GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
[GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
[GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
[GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
[GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
[GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
[GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
[GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
#else
[GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
[GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
[GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
[GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } },
[GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
[GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
[GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
[GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } },
/*
* Segments used for calling PnP BIOS have byte granularity.
* They code segments and data segments have fixed 64k limits,
* the transfer segment sizes are set at run time.
*/
/* 32-bit code */
[GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } },
[GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } },
/* 16-bit code */
[GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } },
[GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } },
/* 16-bit data */
[GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } },
[GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } },
/* 16-bit data */
[GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } },
[GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } },
/* 16-bit data */
[GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } },
[GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } },
/*
* The APM segments have byte granularity and their bases
* are set at run time. All have 64k limits.
*/
/* 32-bit code */
[GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } },
[GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } },
/* 16-bit code */
[GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } },
[GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } },
/* data */
[GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
[GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
[GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
[GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } },
[GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
[GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } },
GDT_STACK_CANARY_INIT
#endif
} };
@@ -164,16 +152,17 @@ static inline int flag_is_changeable_p(u32 flag)
* the CPUID. Add "volatile" to not allow gcc to
* optimize the subsequent calls to this function.
*/
asm volatile ("pushfl\n\t"
"pushfl\n\t"
"popl %0\n\t"
"movl %0,%1\n\t"
"xorl %2,%0\n\t"
"pushl %0\n\t"
"popfl\n\t"
"pushfl\n\t"
"popl %0\n\t"
"popfl\n\t"
asm volatile ("pushfl \n\t"
"pushfl \n\t"
"popl %0 \n\t"
"movl %0, %1 \n\t"
"xorl %2, %0 \n\t"
"pushl %0 \n\t"
"popfl \n\t"
"pushfl \n\t"
"popl %0 \n\t"
"popfl \n\t"
: "=&r" (f1), "=&r" (f2)
: "ir" (flag));
@@ -188,18 +177,22 @@ static int __cpuinit have_cpuid_p(void)
static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
{
if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) {
/* Disable processor serial number */
unsigned long lo, hi;
rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
lo |= 0x200000;
wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
printk(KERN_NOTICE "CPU serial number disabled.\n");
clear_cpu_cap(c, X86_FEATURE_PN);
unsigned long lo, hi;
/* Disabling the serial number may affect the cpuid level */
c->cpuid_level = cpuid_eax(0);
}
if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
return;
/* Disable processor serial number: */
rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
lo |= 0x200000;
wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
printk(KERN_NOTICE "CPU serial number disabled.\n");
clear_cpu_cap(c, X86_FEATURE_PN);
/* Disabling the serial number may affect the cpuid level */
c->cpuid_level = cpuid_eax(0);
}
static int __init x86_serial_nr_setup(char *s)
@@ -232,6 +225,7 @@ struct cpuid_dependent_feature {
u32 feature;
u32 level;
};
static const struct cpuid_dependent_feature __cpuinitconst
cpuid_dependent_features[] = {
{ X86_FEATURE_MWAIT, 0x00000005 },
@@ -243,7 +237,11 @@ cpuid_dependent_features[] = {
static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
{
const struct cpuid_dependent_feature *df;
for (df = cpuid_dependent_features; df->feature; df++) {
if (!cpu_has(c, df->feature))
continue;
/*
* Note: cpuid_level is set to -1 if unavailable, but
* extended_extended_level is set to 0 if unavailable
@@ -251,32 +249,32 @@ static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
* when signed; hence the weird messing around with
* signs here...
*/
if (cpu_has(c, df->feature) &&
((s32)df->level < 0 ?
if (!((s32)df->level < 0 ?
(u32)df->level > (u32)c->extended_cpuid_level :
(s32)df->level > (s32)c->cpuid_level)) {
clear_cpu_cap(c, df->feature);
if (warn)
printk(KERN_WARNING
"CPU: CPU feature %s disabled "
"due to lack of CPUID level 0x%x\n",
x86_cap_flags[df->feature],
df->level);
}
(s32)df->level > (s32)c->cpuid_level))
continue;
clear_cpu_cap(c, df->feature);
if (!warn)
continue;
printk(KERN_WARNING
"CPU: CPU feature %s disabled, no CPUID level 0x%x\n",
x86_cap_flags[df->feature], df->level);
}
}
/*
* Naming convention should be: <Name> [(<Codename>)]
* This table only is used unless init_<vendor>() below doesn't set it;
* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
*
* in particular, if CPUID levels 0x80000002..4 are supported, this
* isn't used
*/
/* Look up CPU names by table lookup. */
static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c)
{
struct cpu_model_info *info;
const struct cpu_model_info *info;
if (c->x86_model >= 16)
return NULL; /* Range check */
@@ -307,8 +305,10 @@ void load_percpu_segment(int cpu)
load_stack_canary_segment();
}
/* Current gdt points %fs at the "master" per-cpu area: after this,
* it's on the real one. */
/*
* Current gdt points %fs at the "master" per-cpu area: after this,
* it's on the real one.
*/
void switch_to_new_gdt(int cpu)
{
struct desc_ptr gdt_descr;
@@ -321,7 +321,7 @@ void switch_to_new_gdt(int cpu)
load_percpu_segment(cpu);
}
static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {};
static void __cpuinit default_init(struct cpuinfo_x86 *c)
{
@@ -340,7 +340,7 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c)
#endif
}
static struct cpu_dev __cpuinitdata default_cpu = {
static const struct cpu_dev __cpuinitconst default_cpu = {
.c_init = default_init,
.c_vendor = "Unknown",
.c_x86_vendor = X86_VENDOR_UNKNOWN,
@@ -354,22 +354,24 @@ static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
if (c->extended_cpuid_level < 0x80000004)
return;
v = (unsigned int *) c->x86_model_id;
v = (unsigned int *)c->x86_model_id;
cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
c->x86_model_id[48] = 0;
/* Intel chips right-justify this string for some dumb reason;
undo that brain damage */
/*
* Intel chips right-justify this string for some dumb reason;
* undo that brain damage:
*/
p = q = &c->x86_model_id[0];
while (*p == ' ')
p++;
p++;
if (p != q) {
while (*p)
*q++ = *p++;
while (q <= &c->x86_model_id[48])
*q++ = '\0'; /* Zero-pad the rest */
while (*p)
*q++ = *p++;
while (q <= &c->x86_model_id[48])
*q++ = '\0'; /* Zero-pad the rest */
}
}
@@ -438,28 +440,31 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
if (smp_num_siblings == 1) {
printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
} else if (smp_num_siblings > 1) {
if (smp_num_siblings > nr_cpu_ids) {
printk(KERN_WARNING "CPU: Unsupported number of siblings %d",
smp_num_siblings);
smp_num_siblings = 1;
return;
}
index_msb = get_count_order(smp_num_siblings);
c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
smp_num_siblings = smp_num_siblings / c->x86_max_cores;
index_msb = get_count_order(smp_num_siblings);
core_bits = get_count_order(c->x86_max_cores);
c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
((1 << core_bits) - 1);
goto out;
}
if (smp_num_siblings <= 1)
goto out;
if (smp_num_siblings > nr_cpu_ids) {
pr_warning("CPU: Unsupported number of siblings %d",
smp_num_siblings);
smp_num_siblings = 1;
return;
}
index_msb = get_count_order(smp_num_siblings);
c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
smp_num_siblings = smp_num_siblings / c->x86_max_cores;
index_msb = get_count_order(smp_num_siblings);
core_bits = get_count_order(c->x86_max_cores);
c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
((1 << core_bits) - 1);
out:
if ((c->x86_max_cores * smp_num_siblings) > 1) {
printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
@@ -473,8 +478,8 @@ out:
static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
{
char *v = c->x86_vendor_id;
int i;
static int printed;
int i;
for (i = 0; i < X86_VENDOR_NUM; i++) {
if (!cpu_devs[i])
@@ -483,6 +488,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
(cpu_devs[i]->c_ident[1] &&
!strcmp(v, cpu_devs[i]->c_ident[1]))) {
this_cpu = cpu_devs[i];
c->x86_vendor = this_cpu->c_x86_vendor;
return;
@@ -491,7 +497,9 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
if (!printed) {
printed++;
printk(KERN_ERR "CPU: vendor_id '%s' unknown, using generic init.\n", v);
printk(KERN_ERR
"CPU: vendor_id '%s' unknown, using generic init.\n", v);
printk(KERN_ERR "CPU: Your system may be unstable.\n");
}
@@ -511,14 +519,17 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
/* Intel-defined flags: level 0x00000001 */
if (c->cpuid_level >= 0x00000001) {
u32 junk, tfms, cap0, misc;
cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
c->x86 = (tfms >> 8) & 0xf;
c->x86_model = (tfms >> 4) & 0xf;
c->x86_mask = tfms & 0xf;
if (c->x86 == 0xf)
c->x86 += (tfms >> 20) & 0xff;
if (c->x86 >= 0x6)
c->x86_model += ((tfms >> 16) & 0xf) << 4;
if (cap0 & (1<<19)) {
c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
c->x86_cache_alignment = c->x86_clflush_size;
@@ -534,6 +545,7 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
/* Intel-defined flags: level 0x00000001 */
if (c->cpuid_level >= 0x00000001) {
u32 capability, excap;
cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
c->x86_capability[0] = capability;
c->x86_capability[4] = excap;
@@ -542,6 +554,7 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
/* AMD-defined flags: level 0x80000001 */
xlvl = cpuid_eax(0x80000000);
c->extended_cpuid_level = xlvl;
if ((xlvl & 0xffff0000) == 0x80000000) {
if (xlvl >= 0x80000001) {
c->x86_capability[1] = cpuid_edx(0x80000001);
@@ -549,13 +562,15 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
}
}
#ifdef CONFIG_X86_64
if (c->extended_cpuid_level >= 0x80000008) {
u32 eax = cpuid_eax(0x80000008);
c->x86_virt_bits = (eax >> 8) & 0xff;
c->x86_phys_bits = eax & 0xff;
}
#ifdef CONFIG_X86_32
else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
c->x86_phys_bits = 36;
#endif
if (c->extended_cpuid_level >= 0x80000007)
@@ -602,8 +617,12 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_X86_64
c->x86_clflush_size = 64;
c->x86_phys_bits = 36;
c->x86_virt_bits = 48;
#else
c->x86_clflush_size = 32;
c->x86_phys_bits = 32;
c->x86_virt_bits = 32;
#endif
c->x86_cache_alignment = c->x86_clflush_size;
@@ -634,12 +653,12 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
void __init early_cpu_init(void)
{
struct cpu_dev **cdev;
const struct cpu_dev *const *cdev;
int count = 0;
printk("KERNEL supported cpus:\n");
printk(KERN_INFO "KERNEL supported cpus:\n");
for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
struct cpu_dev *cpudev = *cdev;
const struct cpu_dev *cpudev = *cdev;
unsigned int j;
if (count >= X86_VENDOR_NUM)
@@ -650,7 +669,7 @@ void __init early_cpu_init(void)
for (j = 0; j < 2; j++) {
if (!cpudev->c_ident[j])
continue;
printk(" %s %s\n", cpudev->c_vendor,
printk(KERN_INFO " %s %s\n", cpudev->c_vendor,
cpudev->c_ident[j]);
}
}
@@ -726,9 +745,13 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
c->x86_coreid_bits = 0;
#ifdef CONFIG_X86_64
c->x86_clflush_size = 64;
c->x86_phys_bits = 36;
c->x86_virt_bits = 48;
#else
c->cpuid_level = -1; /* CPUID not detected */
c->x86_clflush_size = 32;
c->x86_phys_bits = 32;
c->x86_virt_bits = 32;
#endif
c->x86_cache_alignment = c->x86_clflush_size;
memset(&c->x86_capability, 0, sizeof c->x86_capability);
@@ -759,8 +782,8 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
squash_the_stupid_serial_number(c);
/*
* The vendor-specific functions might have changed features. Now
* we do "generic changes."
* The vendor-specific functions might have changed features.
* Now we do "generic changes."
*/
/* Filter out anything that depends on CPUID levels we don't have */
@@ -768,7 +791,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
/* If the model name is still unset, do table lookup. */
if (!c->x86_model_id[0]) {
char *p;
const char *p;
p = table_lookup_model(c);
if (p)
strcpy(c->x86_model_id, p);
@@ -824,6 +847,7 @@ static void vgetcpu_set_mode(void)
void __init identify_boot_cpu(void)
{
identify_cpu(&boot_cpu_data);
init_c1e_mask();
#ifdef CONFIG_X86_32
sysenter_setup();
enable_sep_cpu();
@@ -843,11 +867,11 @@ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
}
struct msr_range {
unsigned min;
unsigned max;
unsigned min;
unsigned max;
};
static struct msr_range msr_range_array[] __cpuinitdata = {
static const struct msr_range msr_range_array[] __cpuinitconst = {
{ 0x00000000, 0x00000418},
{ 0xc0000000, 0xc000040b},
{ 0xc0010000, 0xc0010142},
@@ -856,14 +880,15 @@ static struct msr_range msr_range_array[] __cpuinitdata = {
static void __cpuinit print_cpu_msr(void)
{
unsigned index_min, index_max;
unsigned index;
u64 val;
int i;
unsigned index_min, index_max;
for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
index_min = msr_range_array[i].min;
index_max = msr_range_array[i].max;
for (index = index_min; index < index_max; index++) {
if (rdmsrl_amd_safe(index, &val))
continue;
@@ -873,6 +898,7 @@ static void __cpuinit print_cpu_msr(void)
}
static int show_msr __cpuinitdata;
static __init int setup_show_msr(char *arg)
{
int num;
@@ -894,12 +920,14 @@ __setup("noclflush", setup_noclflush);
void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
{
char *vendor = NULL;
const char *vendor = NULL;
if (c->x86_vendor < X86_VENDOR_NUM)
if (c->x86_vendor < X86_VENDOR_NUM) {
vendor = this_cpu->c_vendor;
else if (c->cpuid_level >= 0)
vendor = c->x86_vendor_id;
} else {
if (c->cpuid_level >= 0)
vendor = c->x86_vendor_id;
}
if (vendor && !strstr(c->x86_model_id, vendor))
printk(KERN_CONT "%s ", vendor);
@@ -926,10 +954,12 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
static __init int setup_disablecpuid(char *arg)
{
int bit;
if (get_option(&arg, &bit) && bit < NCAPINTS*32)
setup_clear_cpu_cap(bit);
else
return 0;
return 1;
}
__setup("clearcpuid=", setup_disablecpuid);
@@ -939,6 +969,7 @@ struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
DEFINE_PER_CPU_FIRST(union irq_stack_union,
irq_stack_union) __aligned(PAGE_SIZE);
DEFINE_PER_CPU(char *, irq_stack_ptr) =
init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
@@ -948,12 +979,21 @@ EXPORT_PER_CPU_SYMBOL(kernel_stack);
DEFINE_PER_CPU(unsigned int, irq_count) = -1;
/*
* Special IST stacks which the CPU switches to when it calls
* an IST-marked descriptor entry. Up to 7 stacks (hardware
* limit), all of them are 4K, except the debug stack which
* is 8K.
*/
static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
[0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
[DEBUG_STACK - 1] = DEBUG_STKSZ
};
static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ])
__aligned(PAGE_SIZE);
extern asmlinkage void ignore_sysret(void);
/* May not be marked __init: used by software suspend */
void syscall_init(void)
{
@@ -983,7 +1023,7 @@ unsigned long kernel_eflags;
*/
DEFINE_PER_CPU(struct orig_ist, orig_ist);
#else /* x86_64 */
#else /* CONFIG_X86_64 */
#ifdef CONFIG_CC_STACKPROTECTOR
DEFINE_PER_CPU(unsigned long, stack_canary);
@@ -995,9 +1035,26 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
memset(regs, 0, sizeof(struct pt_regs));
regs->fs = __KERNEL_PERCPU;
regs->gs = __KERNEL_STACK_CANARY;
return regs;
}
#endif /* x86_64 */
#endif /* CONFIG_X86_64 */
/*
* Clear all 6 debug registers:
*/
static void clear_all_debug_regs(void)
{
int i;
for (i = 0; i < 8; i++) {
/* Ignore db4, db5 */
if ((i == 4) || (i == 5))
continue;
set_debugreg(0, i);
}
}
/*
* cpu_init() initializes state that is per-CPU. Some data is already
@@ -1007,15 +1064,20 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
* A lot of state is already set up in PDA init for 64 bit
*/
#ifdef CONFIG_X86_64
void __cpuinit cpu_init(void)
{
int cpu = stack_smp_processor_id();
struct tss_struct *t = &per_cpu(init_tss, cpu);
struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
unsigned long v;
struct orig_ist *orig_ist;
struct task_struct *me;
struct tss_struct *t;
unsigned long v;
int cpu;
int i;
cpu = stack_smp_processor_id();
t = &per_cpu(init_tss, cpu);
orig_ist = &per_cpu(orig_ist, cpu);
#ifdef CONFIG_NUMA
if (cpu != 0 && percpu_read(node_number) == 0 &&
cpu_to_node(cpu) != NUMA_NO_NODE)
@@ -1056,19 +1118,17 @@ void __cpuinit cpu_init(void)
* set up and load the per-CPU TSS
*/
if (!orig_ist->ist[0]) {
static const unsigned int sizes[N_EXCEPTION_STACKS] = {
[0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
[DEBUG_STACK - 1] = DEBUG_STKSZ
};
char *estacks = per_cpu(exception_stacks, cpu);
for (v = 0; v < N_EXCEPTION_STACKS; v++) {
estacks += sizes[v];
estacks += exception_stack_sizes[v];
orig_ist->ist[v] = t->x86_tss.ist[v] =
(unsigned long)estacks;
}
}
t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
/*
* <= is required because the CPU will access up to
* 8 bits beyond the end of the IO permission bitmap.
@@ -1078,8 +1138,7 @@ void __cpuinit cpu_init(void)
atomic_inc(&init_mm.mm_count);
me->active_mm = &init_mm;
if (me->mm)
BUG();
BUG_ON(me->mm);
enter_lazy_tlb(&init_mm, me);
load_sp0(t, &current->thread);
@@ -1098,17 +1157,7 @@ void __cpuinit cpu_init(void)
arch_kgdb_ops.correct_hw_break();
else
#endif
{
/*
* Clear all 6 debug registers:
*/
set_debugreg(0UL, 0);
set_debugreg(0UL, 1);
set_debugreg(0UL, 2);
set_debugreg(0UL, 3);
set_debugreg(0UL, 6);
set_debugreg(0UL, 7);
}
clear_all_debug_regs();
fpu_init();
@@ -1129,7 +1178,8 @@ void __cpuinit cpu_init(void)
if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
for (;;) local_irq_enable();
for (;;)
local_irq_enable();
}
printk(KERN_INFO "Initializing CPU#%d\n", cpu);
@@ -1145,8 +1195,7 @@ void __cpuinit cpu_init(void)
*/
atomic_inc(&init_mm.mm_count);
curr->active_mm = &init_mm;
if (curr->mm)
BUG();
BUG_ON(curr->mm);
enter_lazy_tlb(&init_mm, curr);
load_sp0(t, thread);
@@ -1159,13 +1208,7 @@ void __cpuinit cpu_init(void)
__set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
#endif
/* Clear all 6 debug registers: */
set_debugreg(0, 0);
set_debugreg(0, 1);
set_debugreg(0, 2);
set_debugreg(0, 3);
set_debugreg(0, 6);
set_debugreg(0, 7);
clear_all_debug_regs();
/*
* Force FPU initialization:
@@ -1185,6 +1228,4 @@ void __cpuinit cpu_init(void)
xsave_init();
}
#endif

Просмотреть файл

@@ -3,33 +3,34 @@
#define ARCH_X86_CPU_H
struct cpu_model_info {
int vendor;
int family;
char *model_names[16];
int vendor;
int family;
const char *model_names[16];
};
/* attempt to consolidate cpu attributes */
struct cpu_dev {
char * c_vendor;
const char *c_vendor;
/* some have two possibilities for cpuid string */
char * c_ident[2];
const char *c_ident[2];
struct cpu_model_info c_models[4];
void (*c_early_init)(struct cpuinfo_x86 *c);
void (*c_init)(struct cpuinfo_x86 * c);
void (*c_identify)(struct cpuinfo_x86 * c);
unsigned int (*c_size_cache)(struct cpuinfo_x86 * c, unsigned int size);
int c_x86_vendor;
void (*c_early_init)(struct cpuinfo_x86 *);
void (*c_init)(struct cpuinfo_x86 *);
void (*c_identify)(struct cpuinfo_x86 *);
unsigned int (*c_size_cache)(struct cpuinfo_x86 *, unsigned int);
int c_x86_vendor;
};
#define cpu_dev_register(cpu_devX) \
static struct cpu_dev *__cpu_dev_##cpu_devX __used \
static const struct cpu_dev *const __cpu_dev_##cpu_devX __used \
__attribute__((__section__(".x86_cpu_dev.init"))) = \
&cpu_devX;
extern struct cpu_dev *__x86_cpu_dev_start[], *__x86_cpu_dev_end[];
extern const struct cpu_dev *const __x86_cpu_dev_start[],
*const __x86_cpu_dev_end[];
extern void display_cacheinfo(struct cpuinfo_x86 *c);

901
arch/x86/kernel/cpu/cpu_debug.c Исполняемый файл
Просмотреть файл

@@ -0,0 +1,901 @@
/*
* CPU x86 architecture debug code
*
* Copyright(C) 2009 Jaswinder Singh Rajput
*
* For licencing details see kernel-base/COPYING
*/
#include <linux/interrupt.h>
#include <linux/compiler.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include <asm/cpu_debug.h>
#include <asm/paravirt.h>
#include <asm/system.h>
#include <asm/traps.h>
#include <asm/apic.h>
#include <asm/desc.h>
static DEFINE_PER_CPU(struct cpu_cpuX_base, cpu_arr[CPU_REG_ALL_BIT]);
static DEFINE_PER_CPU(struct cpu_private *, priv_arr[MAX_CPU_FILES]);
static DEFINE_PER_CPU(unsigned, cpu_modelflag);
static DEFINE_PER_CPU(int, cpu_priv_count);
static DEFINE_PER_CPU(unsigned, cpu_model);
static DEFINE_MUTEX(cpu_debug_lock);
static struct dentry *cpu_debugfs_dir;
static struct cpu_debug_base cpu_base[] = {
{ "mc", CPU_MC, 0 },
{ "monitor", CPU_MONITOR, 0 },
{ "time", CPU_TIME, 0 },
{ "pmc", CPU_PMC, 1 },
{ "platform", CPU_PLATFORM, 0 },
{ "apic", CPU_APIC, 0 },
{ "poweron", CPU_POWERON, 0 },
{ "control", CPU_CONTROL, 0 },
{ "features", CPU_FEATURES, 0 },
{ "lastbranch", CPU_LBRANCH, 0 },
{ "bios", CPU_BIOS, 0 },
{ "freq", CPU_FREQ, 0 },
{ "mtrr", CPU_MTRR, 0 },
{ "perf", CPU_PERF, 0 },
{ "cache", CPU_CACHE, 0 },
{ "sysenter", CPU_SYSENTER, 0 },
{ "therm", CPU_THERM, 0 },
{ "misc", CPU_MISC, 0 },
{ "debug", CPU_DEBUG, 0 },
{ "pat", CPU_PAT, 0 },
{ "vmx", CPU_VMX, 0 },
{ "call", CPU_CALL, 0 },
{ "base", CPU_BASE, 0 },
{ "ver", CPU_VER, 0 },
{ "conf", CPU_CONF, 0 },
{ "smm", CPU_SMM, 0 },
{ "svm", CPU_SVM, 0 },
{ "osvm", CPU_OSVM, 0 },
{ "tss", CPU_TSS, 0 },
{ "cr", CPU_CR, 0 },
{ "dt", CPU_DT, 0 },
{ "registers", CPU_REG_ALL, 0 },
};
static struct cpu_file_base cpu_file[] = {
{ "index", CPU_REG_ALL, 0 },
{ "value", CPU_REG_ALL, 1 },
};
/* Intel Registers Range */
static struct cpu_debug_range cpu_intel_range[] = {
{ 0x00000000, 0x00000001, CPU_MC, CPU_INTEL_ALL },
{ 0x00000006, 0x00000007, CPU_MONITOR, CPU_CX_AT_XE },
{ 0x00000010, 0x00000010, CPU_TIME, CPU_INTEL_ALL },
{ 0x00000011, 0x00000013, CPU_PMC, CPU_INTEL_PENTIUM },
{ 0x00000017, 0x00000017, CPU_PLATFORM, CPU_PX_CX_AT_XE },
{ 0x0000001B, 0x0000001B, CPU_APIC, CPU_P6_CX_AT_XE },
{ 0x0000002A, 0x0000002A, CPU_POWERON, CPU_PX_CX_AT_XE },
{ 0x0000002B, 0x0000002B, CPU_POWERON, CPU_INTEL_XEON },
{ 0x0000002C, 0x0000002C, CPU_FREQ, CPU_INTEL_XEON },
{ 0x0000003A, 0x0000003A, CPU_CONTROL, CPU_CX_AT_XE },
{ 0x00000040, 0x00000043, CPU_LBRANCH, CPU_PM_CX_AT_XE },
{ 0x00000044, 0x00000047, CPU_LBRANCH, CPU_PM_CO_AT },
{ 0x00000060, 0x00000063, CPU_LBRANCH, CPU_C2_AT },
{ 0x00000064, 0x00000067, CPU_LBRANCH, CPU_INTEL_ATOM },
{ 0x00000079, 0x00000079, CPU_BIOS, CPU_P6_CX_AT_XE },
{ 0x00000088, 0x0000008A, CPU_CACHE, CPU_INTEL_P6 },
{ 0x0000008B, 0x0000008B, CPU_BIOS, CPU_P6_CX_AT_XE },
{ 0x0000009B, 0x0000009B, CPU_MONITOR, CPU_INTEL_XEON },
{ 0x000000C1, 0x000000C2, CPU_PMC, CPU_P6_CX_AT },
{ 0x000000CD, 0x000000CD, CPU_FREQ, CPU_CX_AT },
{ 0x000000E7, 0x000000E8, CPU_PERF, CPU_CX_AT },
{ 0x000000FE, 0x000000FE, CPU_MTRR, CPU_P6_CX_XE },
{ 0x00000116, 0x00000116, CPU_CACHE, CPU_INTEL_P6 },
{ 0x00000118, 0x00000118, CPU_CACHE, CPU_INTEL_P6 },
{ 0x00000119, 0x00000119, CPU_CACHE, CPU_INTEL_PX },
{ 0x0000011A, 0x0000011B, CPU_CACHE, CPU_INTEL_P6 },
{ 0x0000011E, 0x0000011E, CPU_CACHE, CPU_PX_CX_AT },
{ 0x00000174, 0x00000176, CPU_SYSENTER, CPU_P6_CX_AT_XE },
{ 0x00000179, 0x0000017A, CPU_MC, CPU_PX_CX_AT_XE },
{ 0x0000017B, 0x0000017B, CPU_MC, CPU_P6_XE },
{ 0x00000186, 0x00000187, CPU_PMC, CPU_P6_CX_AT },
{ 0x00000198, 0x00000199, CPU_PERF, CPU_PM_CX_AT_XE },
{ 0x0000019A, 0x0000019A, CPU_TIME, CPU_PM_CX_AT_XE },
{ 0x0000019B, 0x0000019D, CPU_THERM, CPU_PM_CX_AT_XE },
{ 0x000001A0, 0x000001A0, CPU_MISC, CPU_PM_CX_AT_XE },
{ 0x000001C9, 0x000001C9, CPU_LBRANCH, CPU_PM_CX_AT },
{ 0x000001D7, 0x000001D8, CPU_LBRANCH, CPU_INTEL_XEON },
{ 0x000001D9, 0x000001D9, CPU_DEBUG, CPU_CX_AT_XE },
{ 0x000001DA, 0x000001DA, CPU_LBRANCH, CPU_INTEL_XEON },
{ 0x000001DB, 0x000001DB, CPU_LBRANCH, CPU_P6_XE },
{ 0x000001DC, 0x000001DC, CPU_LBRANCH, CPU_INTEL_P6 },
{ 0x000001DD, 0x000001DE, CPU_LBRANCH, CPU_PX_CX_AT_XE },
{ 0x000001E0, 0x000001E0, CPU_LBRANCH, CPU_INTEL_P6 },
{ 0x00000200, 0x0000020F, CPU_MTRR, CPU_P6_CX_XE },
{ 0x00000250, 0x00000250, CPU_MTRR, CPU_P6_CX_XE },
{ 0x00000258, 0x00000259, CPU_MTRR, CPU_P6_CX_XE },
{ 0x00000268, 0x0000026F, CPU_MTRR, CPU_P6_CX_XE },
{ 0x00000277, 0x00000277, CPU_PAT, CPU_C2_AT_XE },
{ 0x000002FF, 0x000002FF, CPU_MTRR, CPU_P6_CX_XE },
{ 0x00000300, 0x00000308, CPU_PMC, CPU_INTEL_XEON },
{ 0x00000309, 0x0000030B, CPU_PMC, CPU_C2_AT_XE },
{ 0x0000030C, 0x00000311, CPU_PMC, CPU_INTEL_XEON },
{ 0x00000345, 0x00000345, CPU_PMC, CPU_C2_AT },
{ 0x00000360, 0x00000371, CPU_PMC, CPU_INTEL_XEON },
{ 0x0000038D, 0x00000390, CPU_PMC, CPU_C2_AT },
{ 0x000003A0, 0x000003BE, CPU_PMC, CPU_INTEL_XEON },
{ 0x000003C0, 0x000003CD, CPU_PMC, CPU_INTEL_XEON },
{ 0x000003E0, 0x000003E1, CPU_PMC, CPU_INTEL_XEON },
{ 0x000003F0, 0x000003F0, CPU_PMC, CPU_INTEL_XEON },
{ 0x000003F1, 0x000003F1, CPU_PMC, CPU_C2_AT_XE },
{ 0x000003F2, 0x000003F2, CPU_PMC, CPU_INTEL_XEON },
{ 0x00000400, 0x00000402, CPU_MC, CPU_PM_CX_AT_XE },
{ 0x00000403, 0x00000403, CPU_MC, CPU_INTEL_XEON },
{ 0x00000404, 0x00000406, CPU_MC, CPU_PM_CX_AT_XE },
{ 0x00000407, 0x00000407, CPU_MC, CPU_INTEL_XEON },
{ 0x00000408, 0x0000040A, CPU_MC, CPU_PM_CX_AT_XE },
{ 0x0000040B, 0x0000040B, CPU_MC, CPU_INTEL_XEON },
{ 0x0000040C, 0x0000040E, CPU_MC, CPU_PM_CX_XE },
{ 0x0000040F, 0x0000040F, CPU_MC, CPU_INTEL_XEON },
{ 0x00000410, 0x00000412, CPU_MC, CPU_PM_CX_AT_XE },
{ 0x00000413, 0x00000417, CPU_MC, CPU_CX_AT_XE },
{ 0x00000480, 0x0000048B, CPU_VMX, CPU_CX_AT_XE },
{ 0x00000600, 0x00000600, CPU_DEBUG, CPU_PM_CX_AT_XE },
{ 0x00000680, 0x0000068F, CPU_LBRANCH, CPU_INTEL_XEON },
{ 0x000006C0, 0x000006CF, CPU_LBRANCH, CPU_INTEL_XEON },
{ 0x000107CC, 0x000107D3, CPU_PMC, CPU_INTEL_XEON_MP },
{ 0xC0000080, 0xC0000080, CPU_FEATURES, CPU_INTEL_XEON },
{ 0xC0000081, 0xC0000082, CPU_CALL, CPU_INTEL_XEON },
{ 0xC0000084, 0xC0000084, CPU_CALL, CPU_INTEL_XEON },
{ 0xC0000100, 0xC0000102, CPU_BASE, CPU_INTEL_XEON },
};
/* AMD Registers Range */
static struct cpu_debug_range cpu_amd_range[] = {
{ 0x00000000, 0x00000001, CPU_MC, CPU_K10_PLUS, },
{ 0x00000010, 0x00000010, CPU_TIME, CPU_K8_PLUS, },
{ 0x0000001B, 0x0000001B, CPU_APIC, CPU_K8_PLUS, },
{ 0x0000002A, 0x0000002A, CPU_POWERON, CPU_K7_PLUS },
{ 0x0000008B, 0x0000008B, CPU_VER, CPU_K8_PLUS },
{ 0x000000FE, 0x000000FE, CPU_MTRR, CPU_K8_PLUS, },
{ 0x00000174, 0x00000176, CPU_SYSENTER, CPU_K8_PLUS, },
{ 0x00000179, 0x0000017B, CPU_MC, CPU_K8_PLUS, },
{ 0x000001D9, 0x000001D9, CPU_DEBUG, CPU_K8_PLUS, },
{ 0x000001DB, 0x000001DE, CPU_LBRANCH, CPU_K8_PLUS, },
{ 0x00000200, 0x0000020F, CPU_MTRR, CPU_K8_PLUS, },
{ 0x00000250, 0x00000250, CPU_MTRR, CPU_K8_PLUS, },
{ 0x00000258, 0x00000259, CPU_MTRR, CPU_K8_PLUS, },
{ 0x00000268, 0x0000026F, CPU_MTRR, CPU_K8_PLUS, },
{ 0x00000277, 0x00000277, CPU_PAT, CPU_K8_PLUS, },
{ 0x000002FF, 0x000002FF, CPU_MTRR, CPU_K8_PLUS, },
{ 0x00000400, 0x00000413, CPU_MC, CPU_K8_PLUS, },
{ 0xC0000080, 0xC0000080, CPU_FEATURES, CPU_AMD_ALL, },
{ 0xC0000081, 0xC0000084, CPU_CALL, CPU_K8_PLUS, },
{ 0xC0000100, 0xC0000102, CPU_BASE, CPU_K8_PLUS, },
{ 0xC0000103, 0xC0000103, CPU_TIME, CPU_K10_PLUS, },
{ 0xC0010000, 0xC0010007, CPU_PMC, CPU_K8_PLUS, },
{ 0xC0010010, 0xC0010010, CPU_CONF, CPU_K7_PLUS, },
{ 0xC0010015, 0xC0010015, CPU_CONF, CPU_K7_PLUS, },
{ 0xC0010016, 0xC001001A, CPU_MTRR, CPU_K8_PLUS, },
{ 0xC001001D, 0xC001001D, CPU_MTRR, CPU_K8_PLUS, },
{ 0xC001001F, 0xC001001F, CPU_CONF, CPU_K8_PLUS, },
{ 0xC0010030, 0xC0010035, CPU_BIOS, CPU_K8_PLUS, },
{ 0xC0010044, 0xC0010048, CPU_MC, CPU_K8_PLUS, },
{ 0xC0010050, 0xC0010056, CPU_SMM, CPU_K0F_PLUS, },
{ 0xC0010058, 0xC0010058, CPU_CONF, CPU_K10_PLUS, },
{ 0xC0010060, 0xC0010060, CPU_CACHE, CPU_AMD_11, },
{ 0xC0010061, 0xC0010068, CPU_SMM, CPU_K10_PLUS, },
{ 0xC0010069, 0xC001006B, CPU_SMM, CPU_AMD_11, },
{ 0xC0010070, 0xC0010071, CPU_SMM, CPU_K10_PLUS, },
{ 0xC0010111, 0xC0010113, CPU_SMM, CPU_K8_PLUS, },
{ 0xC0010114, 0xC0010118, CPU_SVM, CPU_K10_PLUS, },
{ 0xC0010140, 0xC0010141, CPU_OSVM, CPU_K10_PLUS, },
{ 0xC0011022, 0xC0011023, CPU_CONF, CPU_K10_PLUS, },
};
/* Intel */
static int get_intel_modelflag(unsigned model)
{
int flag;
switch (model) {
case 0x0501:
case 0x0502:
case 0x0504:
flag = CPU_INTEL_PENTIUM;
break;
case 0x0601:
case 0x0603:
case 0x0605:
case 0x0607:
case 0x0608:
case 0x060A:
case 0x060B:
flag = CPU_INTEL_P6;
break;
case 0x0609:
case 0x060D:
flag = CPU_INTEL_PENTIUM_M;
break;
case 0x060E:
flag = CPU_INTEL_CORE;
break;
case 0x060F:
case 0x0617:
flag = CPU_INTEL_CORE2;
break;
case 0x061C:
flag = CPU_INTEL_ATOM;
break;
case 0x0F00:
case 0x0F01:
case 0x0F02:
case 0x0F03:
case 0x0F04:
flag = CPU_INTEL_XEON_P4;
break;
case 0x0F06:
flag = CPU_INTEL_XEON_MP;
break;
default:
flag = CPU_NONE;
break;
}
return flag;
}
/* AMD */
static int get_amd_modelflag(unsigned model)
{
int flag;
switch (model >> 8) {
case 0x6:
flag = CPU_AMD_K6;
break;
case 0x7:
flag = CPU_AMD_K7;
break;
case 0x8:
flag = CPU_AMD_K8;
break;
case 0xf:
flag = CPU_AMD_0F;
break;
case 0x10:
flag = CPU_AMD_10;
break;
case 0x11:
flag = CPU_AMD_11;
break;
default:
flag = CPU_NONE;
break;
}
return flag;
}
static int get_cpu_modelflag(unsigned cpu)
{
int flag;
flag = per_cpu(cpu_model, cpu);
switch (flag >> 16) {
case X86_VENDOR_INTEL:
flag = get_intel_modelflag(flag);
break;
case X86_VENDOR_AMD:
flag = get_amd_modelflag(flag & 0xffff);
break;
default:
flag = CPU_NONE;
break;
}
return flag;
}
static int get_cpu_range_count(unsigned cpu)
{
int index;
switch (per_cpu(cpu_model, cpu) >> 16) {
case X86_VENDOR_INTEL:
index = ARRAY_SIZE(cpu_intel_range);
break;
case X86_VENDOR_AMD:
index = ARRAY_SIZE(cpu_amd_range);
break;
default:
index = 0;
break;
}
return index;
}
static int is_typeflag_valid(unsigned cpu, unsigned flag)
{
unsigned vendor, modelflag;
int i, index;
/* Standard Registers should be always valid */
if (flag >= CPU_TSS)
return 1;
modelflag = per_cpu(cpu_modelflag, cpu);
vendor = per_cpu(cpu_model, cpu) >> 16;
index = get_cpu_range_count(cpu);
for (i = 0; i < index; i++) {
switch (vendor) {
case X86_VENDOR_INTEL:
if ((cpu_intel_range[i].model & modelflag) &&
(cpu_intel_range[i].flag & flag))
return 1;
break;
case X86_VENDOR_AMD:
if ((cpu_amd_range[i].model & modelflag) &&
(cpu_amd_range[i].flag & flag))
return 1;
break;
}
}
/* Invalid */
return 0;
}
static unsigned get_cpu_range(unsigned cpu, unsigned *min, unsigned *max,
int index, unsigned flag)
{
unsigned modelflag;
modelflag = per_cpu(cpu_modelflag, cpu);
*max = 0;
switch (per_cpu(cpu_model, cpu) >> 16) {
case X86_VENDOR_INTEL:
if ((cpu_intel_range[index].model & modelflag) &&
(cpu_intel_range[index].flag & flag)) {
*min = cpu_intel_range[index].min;
*max = cpu_intel_range[index].max;
}
break;
case X86_VENDOR_AMD:
if ((cpu_amd_range[index].model & modelflag) &&
(cpu_amd_range[index].flag & flag)) {
*min = cpu_amd_range[index].min;
*max = cpu_amd_range[index].max;
}
break;
}
return *max;
}
/* This function can also be called with seq = NULL for printk */
static void print_cpu_data(struct seq_file *seq, unsigned type,
u32 low, u32 high)
{
struct cpu_private *priv;
u64 val = high;
if (seq) {
priv = seq->private;
if (priv->file) {
val = (val << 32) | low;
seq_printf(seq, "0x%llx\n", val);
} else
seq_printf(seq, " %08x: %08x_%08x\n",
type, high, low);
} else
printk(KERN_INFO " %08x: %08x_%08x\n", type, high, low);
}
/* This function can also be called with seq = NULL for printk */
static void print_msr(struct seq_file *seq, unsigned cpu, unsigned flag)
{
unsigned msr, msr_min, msr_max;
struct cpu_private *priv;
u32 low, high;
int i, range;
if (seq) {
priv = seq->private;
if (priv->file) {
if (!rdmsr_safe_on_cpu(priv->cpu, priv->reg,
&low, &high))
print_cpu_data(seq, priv->reg, low, high);
return;
}
}
range = get_cpu_range_count(cpu);
for (i = 0; i < range; i++) {
if (!get_cpu_range(cpu, &msr_min, &msr_max, i, flag))
continue;
for (msr = msr_min; msr <= msr_max; msr++) {
if (rdmsr_safe_on_cpu(cpu, msr, &low, &high))
continue;
print_cpu_data(seq, msr, low, high);
}
}
}
static void print_tss(void *arg)
{
struct pt_regs *regs = task_pt_regs(current);
struct seq_file *seq = arg;
unsigned int seg;
seq_printf(seq, " RAX\t: %016lx\n", regs->ax);
seq_printf(seq, " RBX\t: %016lx\n", regs->bx);
seq_printf(seq, " RCX\t: %016lx\n", regs->cx);
seq_printf(seq, " RDX\t: %016lx\n", regs->dx);
seq_printf(seq, " RSI\t: %016lx\n", regs->si);
seq_printf(seq, " RDI\t: %016lx\n", regs->di);
seq_printf(seq, " RBP\t: %016lx\n", regs->bp);
seq_printf(seq, " ESP\t: %016lx\n", regs->sp);
#ifdef CONFIG_X86_64
seq_printf(seq, " R08\t: %016lx\n", regs->r8);
seq_printf(seq, " R09\t: %016lx\n", regs->r9);
seq_printf(seq, " R10\t: %016lx\n", regs->r10);
seq_printf(seq, " R11\t: %016lx\n", regs->r11);
seq_printf(seq, " R12\t: %016lx\n", regs->r12);
seq_printf(seq, " R13\t: %016lx\n", regs->r13);
seq_printf(seq, " R14\t: %016lx\n", regs->r14);
seq_printf(seq, " R15\t: %016lx\n", regs->r15);
#endif
asm("movl %%cs,%0" : "=r" (seg));
seq_printf(seq, " CS\t: %04x\n", seg);
asm("movl %%ds,%0" : "=r" (seg));
seq_printf(seq, " DS\t: %04x\n", seg);
seq_printf(seq, " SS\t: %04lx\n", regs->ss & 0xffff);
asm("movl %%es,%0" : "=r" (seg));
seq_printf(seq, " ES\t: %04x\n", seg);
asm("movl %%fs,%0" : "=r" (seg));
seq_printf(seq, " FS\t: %04x\n", seg);
asm("movl %%gs,%0" : "=r" (seg));
seq_printf(seq, " GS\t: %04x\n", seg);
seq_printf(seq, " EFLAGS\t: %016lx\n", regs->flags);
seq_printf(seq, " EIP\t: %016lx\n", regs->ip);
}
static void print_cr(void *arg)
{
struct seq_file *seq = arg;
seq_printf(seq, " cr0\t: %016lx\n", read_cr0());
seq_printf(seq, " cr2\t: %016lx\n", read_cr2());
seq_printf(seq, " cr3\t: %016lx\n", read_cr3());
seq_printf(seq, " cr4\t: %016lx\n", read_cr4_safe());
#ifdef CONFIG_X86_64
seq_printf(seq, " cr8\t: %016lx\n", read_cr8());
#endif
}
static void print_desc_ptr(char *str, struct seq_file *seq, struct desc_ptr dt)
{
seq_printf(seq, " %s\t: %016llx\n", str, (u64)(dt.address | dt.size));
}
static void print_dt(void *seq)
{
struct desc_ptr dt;
unsigned long ldt;
/* IDT */
store_idt((struct desc_ptr *)&dt);
print_desc_ptr("IDT", seq, dt);
/* GDT */
store_gdt((struct desc_ptr *)&dt);
print_desc_ptr("GDT", seq, dt);
/* LDT */
store_ldt(ldt);
seq_printf(seq, " LDT\t: %016lx\n", ldt);
/* TR */
store_tr(ldt);
seq_printf(seq, " TR\t: %016lx\n", ldt);
}
static void print_dr(void *arg)
{
struct seq_file *seq = arg;
unsigned long dr;
int i;
for (i = 0; i < 8; i++) {
/* Ignore db4, db5 */
if ((i == 4) || (i == 5))
continue;
get_debugreg(dr, i);
seq_printf(seq, " dr%d\t: %016lx\n", i, dr);
}
seq_printf(seq, "\n MSR\t:\n");
}
static void print_apic(void *arg)
{
struct seq_file *seq = arg;
#ifdef CONFIG_X86_LOCAL_APIC
seq_printf(seq, " LAPIC\t:\n");
seq_printf(seq, " ID\t\t: %08x\n", apic_read(APIC_ID) >> 24);
seq_printf(seq, " LVR\t\t: %08x\n", apic_read(APIC_LVR));
seq_printf(seq, " TASKPRI\t: %08x\n", apic_read(APIC_TASKPRI));
seq_printf(seq, " ARBPRI\t\t: %08x\n", apic_read(APIC_ARBPRI));
seq_printf(seq, " PROCPRI\t: %08x\n", apic_read(APIC_PROCPRI));
seq_printf(seq, " LDR\t\t: %08x\n", apic_read(APIC_LDR));
seq_printf(seq, " DFR\t\t: %08x\n", apic_read(APIC_DFR));
seq_printf(seq, " SPIV\t\t: %08x\n", apic_read(APIC_SPIV));
seq_printf(seq, " ISR\t\t: %08x\n", apic_read(APIC_ISR));
seq_printf(seq, " ESR\t\t: %08x\n", apic_read(APIC_ESR));
seq_printf(seq, " ICR\t\t: %08x\n", apic_read(APIC_ICR));
seq_printf(seq, " ICR2\t\t: %08x\n", apic_read(APIC_ICR2));
seq_printf(seq, " LVTT\t\t: %08x\n", apic_read(APIC_LVTT));
seq_printf(seq, " LVTTHMR\t: %08x\n", apic_read(APIC_LVTTHMR));
seq_printf(seq, " LVTPC\t\t: %08x\n", apic_read(APIC_LVTPC));
seq_printf(seq, " LVT0\t\t: %08x\n", apic_read(APIC_LVT0));
seq_printf(seq, " LVT1\t\t: %08x\n", apic_read(APIC_LVT1));
seq_printf(seq, " LVTERR\t\t: %08x\n", apic_read(APIC_LVTERR));
seq_printf(seq, " TMICT\t\t: %08x\n", apic_read(APIC_TMICT));
seq_printf(seq, " TMCCT\t\t: %08x\n", apic_read(APIC_TMCCT));
seq_printf(seq, " TDCR\t\t: %08x\n", apic_read(APIC_TDCR));
#endif /* CONFIG_X86_LOCAL_APIC */
seq_printf(seq, "\n MSR\t:\n");
}
static int cpu_seq_show(struct seq_file *seq, void *v)
{
struct cpu_private *priv = seq->private;
if (priv == NULL)
return -EINVAL;
switch (cpu_base[priv->type].flag) {
case CPU_TSS:
smp_call_function_single(priv->cpu, print_tss, seq, 1);
break;
case CPU_CR:
smp_call_function_single(priv->cpu, print_cr, seq, 1);
break;
case CPU_DT:
smp_call_function_single(priv->cpu, print_dt, seq, 1);
break;
case CPU_DEBUG:
if (priv->file == CPU_INDEX_BIT)
smp_call_function_single(priv->cpu, print_dr, seq, 1);
print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
break;
case CPU_APIC:
if (priv->file == CPU_INDEX_BIT)
smp_call_function_single(priv->cpu, print_apic, seq, 1);
print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
break;
default:
print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
break;
}
seq_printf(seq, "\n");
return 0;
}
static void *cpu_seq_start(struct seq_file *seq, loff_t *pos)
{
if (*pos == 0) /* One time is enough ;-) */
return seq;
return NULL;
}
static void *cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
(*pos)++;
return cpu_seq_start(seq, pos);
}
static void cpu_seq_stop(struct seq_file *seq, void *v)
{
}
static const struct seq_operations cpu_seq_ops = {
.start = cpu_seq_start,
.next = cpu_seq_next,
.stop = cpu_seq_stop,
.show = cpu_seq_show,
};
static int cpu_seq_open(struct inode *inode, struct file *file)
{
struct cpu_private *priv = inode->i_private;
struct seq_file *seq;
int err;
err = seq_open(file, &cpu_seq_ops);
if (!err) {
seq = file->private_data;
seq->private = priv;
}
return err;
}
static int write_msr(struct cpu_private *priv, u64 val)
{
u32 low, high;
high = (val >> 32) & 0xffffffff;
low = val & 0xffffffff;
if (!wrmsr_safe_on_cpu(priv->cpu, priv->reg, low, high))
return 0;
return -EPERM;
}
static int write_cpu_register(struct cpu_private *priv, const char *buf)
{
int ret = -EPERM;
u64 val;
ret = strict_strtoull(buf, 0, &val);
if (ret < 0)
return ret;
/* Supporting only MSRs */
if (priv->type < CPU_TSS_BIT)
return write_msr(priv, val);
return ret;
}
static ssize_t cpu_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *off)
{
struct seq_file *seq = file->private_data;
struct cpu_private *priv = seq->private;
char buf[19];
if ((priv == NULL) || (count >= sizeof(buf)))
return -EINVAL;
if (copy_from_user(&buf, ubuf, count))
return -EFAULT;
buf[count] = 0;
if ((cpu_base[priv->type].write) && (cpu_file[priv->file].write))
if (!write_cpu_register(priv, buf))
return count;
return -EACCES;
}
static const struct file_operations cpu_fops = {
.owner = THIS_MODULE,
.open = cpu_seq_open,
.read = seq_read,
.write = cpu_write,
.llseek = seq_lseek,
.release = seq_release,
};
static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
unsigned file, struct dentry *dentry)
{
struct cpu_private *priv = NULL;
/* Already intialized */
if (file == CPU_INDEX_BIT)
if (per_cpu(cpu_arr[type].init, cpu))
return 0;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (priv == NULL)
return -ENOMEM;
priv->cpu = cpu;
priv->type = type;
priv->reg = reg;
priv->file = file;
mutex_lock(&cpu_debug_lock);
per_cpu(priv_arr[type], cpu) = priv;
per_cpu(cpu_priv_count, cpu)++;
mutex_unlock(&cpu_debug_lock);
if (file)
debugfs_create_file(cpu_file[file].name, S_IRUGO,
dentry, (void *)priv, &cpu_fops);
else {
debugfs_create_file(cpu_base[type].name, S_IRUGO,
per_cpu(cpu_arr[type].dentry, cpu),
(void *)priv, &cpu_fops);
mutex_lock(&cpu_debug_lock);
per_cpu(cpu_arr[type].init, cpu) = 1;
mutex_unlock(&cpu_debug_lock);
}
return 0;
}
static int cpu_init_regfiles(unsigned cpu, unsigned int type, unsigned reg,
struct dentry *dentry)
{
unsigned file;
int err = 0;
for (file = 0; file < ARRAY_SIZE(cpu_file); file++) {
err = cpu_create_file(cpu, type, reg, file, dentry);
if (err)
return err;
}
return err;
}
static int cpu_init_msr(unsigned cpu, unsigned type, struct dentry *dentry)
{
struct dentry *cpu_dentry = NULL;
unsigned reg, reg_min, reg_max;
int i, range, err = 0;
char reg_dir[12];
u32 low, high;
range = get_cpu_range_count(cpu);
for (i = 0; i < range; i++) {
if (!get_cpu_range(cpu, &reg_min, &reg_max, i,
cpu_base[type].flag))
continue;
for (reg = reg_min; reg <= reg_max; reg++) {
if (rdmsr_safe_on_cpu(cpu, reg, &low, &high))
continue;
sprintf(reg_dir, "0x%x", reg);
cpu_dentry = debugfs_create_dir(reg_dir, dentry);
err = cpu_init_regfiles(cpu, type, reg, cpu_dentry);
if (err)
return err;
}
}
return err;
}
static int cpu_init_allreg(unsigned cpu, struct dentry *dentry)
{
struct dentry *cpu_dentry = NULL;
unsigned type;
int err = 0;
for (type = 0; type < ARRAY_SIZE(cpu_base) - 1; type++) {
if (!is_typeflag_valid(cpu, cpu_base[type].flag))
continue;
cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry);
per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry;
if (type < CPU_TSS_BIT)
err = cpu_init_msr(cpu, type, cpu_dentry);
else
err = cpu_create_file(cpu, type, 0, CPU_INDEX_BIT,
cpu_dentry);
if (err)
return err;
}
return err;
}
static int cpu_init_cpu(void)
{
struct dentry *cpu_dentry = NULL;
struct cpuinfo_x86 *cpui;
char cpu_dir[12];
unsigned cpu;
int err = 0;
for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
cpui = &cpu_data(cpu);
if (!cpu_has(cpui, X86_FEATURE_MSR))
continue;
per_cpu(cpu_model, cpu) = ((cpui->x86_vendor << 16) |
(cpui->x86 << 8) |
(cpui->x86_model));
per_cpu(cpu_modelflag, cpu) = get_cpu_modelflag(cpu);
sprintf(cpu_dir, "cpu%d", cpu);
cpu_dentry = debugfs_create_dir(cpu_dir, cpu_debugfs_dir);
err = cpu_init_allreg(cpu, cpu_dentry);
pr_info("cpu%d(%d) debug files %d\n",
cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu));
if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) {
pr_err("Register files count %d exceeds limit %d\n",
per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES);
per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES;
err = -ENFILE;
}
if (err)
return err;
}
return err;
}
static int __init cpu_debug_init(void)
{
cpu_debugfs_dir = debugfs_create_dir("cpu", arch_debugfs_dir);
return cpu_init_cpu();
}
static void __exit cpu_debug_exit(void)
{
int i, cpu;
if (cpu_debugfs_dir)
debugfs_remove_recursive(cpu_debugfs_dir);
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++)
kfree(per_cpu(priv_arr[i], cpu));
}
module_init(cpu_debug_init);
module_exit(cpu_debug_exit);
MODULE_AUTHOR("Jaswinder Singh Rajput");
MODULE_DESCRIPTION("CPU Debug module");
MODULE_LICENSE("GPL");

Просмотреть файл

@@ -87,30 +87,15 @@ config X86_POWERNOW_K7_ACPI
config X86_POWERNOW_K8
tristate "AMD Opteron/Athlon64 PowerNow!"
select CPU_FREQ_TABLE
depends on ACPI && ACPI_PROCESSOR
help
This adds the CPUFreq driver for mobile AMD Opteron/Athlon64 processors.
This adds the CPUFreq driver for K8/K10 Opteron/Athlon64 processors.
To compile this driver as a module, choose M here: the
module will be called powernow-k8.
For details, take a look at <file:Documentation/cpu-freq/>.
If in doubt, say N.
config X86_POWERNOW_K8_ACPI
bool
prompt "ACPI Support" if X86_32
depends on ACPI && X86_POWERNOW_K8 && ACPI_PROCESSOR
depends on !(X86_POWERNOW_K8 = y && ACPI_PROCESSOR = m)
default y
help
This provides access to the K8s Processor Performance States via ACPI.
This driver is probably required for CPUFreq to work with multi-socket and
SMP systems. It is not required on at least some single-socket yet
multi-core systems, even if SMP is enabled.
It is safe to say Y here.
config X86_GX_SUSPMOD
tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation"
depends on X86_32 && PCI

Просмотреть файл

@@ -1,6 +1,11 @@
# Link order matters. K8 is preferred to ACPI because of firmware bugs in early
# K8 systems. ACPI is preferred to all other hardware-specific drivers.
# speedstep-* is preferred over p4-clockmod.
obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o
obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o
obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
obj-$(CONFIG_X86_LONGHAUL) += longhaul.o
obj-$(CONFIG_X86_E_POWERSAVER) += e_powersaver.o
obj-$(CONFIG_ELAN_CPUFREQ) += elanfreq.o
@@ -10,7 +15,6 @@ obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o
obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o
obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o
obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o
obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o
obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o

Просмотреть файл

@@ -1,5 +1,5 @@
/*
* acpi-cpufreq.c - ACPI Processor P-States Driver ($Revision: 1.4 $)
* acpi-cpufreq.c - ACPI Processor P-States Driver
*
* Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
@@ -36,16 +36,18 @@
#include <trace/power.h>
#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <acpi/processor.h>
#include <asm/io.h>
#include <asm/msr.h>
#include <asm/processor.h>
#include <asm/cpufeature.h>
#include <asm/delay.h>
#include <asm/uaccess.h>
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg)
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
"acpi-cpufreq", msg)
MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
MODULE_DESCRIPTION("ACPI Processor P-States Driver");
@@ -97,7 +99,7 @@ static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
perf = data->acpi_data;
for (i=0; i<perf->state_count; i++) {
for (i = 0; i < perf->state_count; i++) {
if (value == perf->states[i].status)
return data->freq_table[i].frequency;
}
@@ -112,7 +114,7 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
msr &= INTEL_MSR_RANGE;
perf = data->acpi_data;
for (i=0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
if (msr == perf->states[data->freq_table[i].index].status)
return data->freq_table[i].frequency;
}
@@ -140,15 +142,13 @@ struct io_addr {
u8 bit_width;
};
typedef union {
struct msr_addr msr;
struct io_addr io;
} drv_addr_union;
struct drv_cmd {
unsigned int type;
const struct cpumask *mask;
drv_addr_union addr;
union {
struct msr_addr msr;
struct io_addr io;
} addr;
u32 val;
};
@@ -371,7 +371,7 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
unsigned int cur_freq;
unsigned int i;
for (i=0; i<100; i++) {
for (i = 0; i < 100; i++) {
cur_freq = extract_freq(get_cur_val(mask), data);
if (cur_freq == freq)
return 1;
@@ -496,7 +496,7 @@ acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
unsigned long freq;
unsigned long freqn = perf->states[0].core_frequency * 1000;
for (i=0; i<(perf->state_count-1); i++) {
for (i = 0; i < (perf->state_count-1); i++) {
freq = freqn;
freqn = perf->states[i+1].core_frequency * 1000;
if ((2 * cpu_khz) > (freqn + freq)) {
@@ -675,17 +675,29 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
/* detect transition latency */
policy->cpuinfo.transition_latency = 0;
for (i=0; i<perf->state_count; i++) {
for (i = 0; i < perf->state_count; i++) {
if ((perf->states[i].transition_latency * 1000) >
policy->cpuinfo.transition_latency)
policy->cpuinfo.transition_latency =
perf->states[i].transition_latency * 1000;
}
/* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
policy->cpuinfo.transition_latency > 20 * 1000) {
static int print_once;
policy->cpuinfo.transition_latency = 20 * 1000;
if (!print_once) {
print_once = 1;
printk(KERN_INFO "Capping off P-state tranision latency"
" at 20 uS\n");
}
}
data->max_freq = perf->states[0].core_frequency * 1000;
/* table init */
for (i=0; i<perf->state_count; i++) {
if (i>0 && perf->states[i].core_frequency >=
for (i = 0; i < perf->state_count; i++) {
if (i > 0 && perf->states[i].core_frequency >=
data->freq_table[valid_states-1].frequency / 1000)
continue;

Просмотреть файл

@@ -32,7 +32,7 @@
* nforce2_chipset:
* FSB is changed using the chipset
*/
static struct pci_dev *nforce2_chipset_dev;
static struct pci_dev *nforce2_dev;
/* fid:
* multiplier * 10
@@ -56,7 +56,9 @@ MODULE_PARM_DESC(fid, "CPU multiplier to use (11.5 = 115)");
MODULE_PARM_DESC(min_fsb,
"Minimum FSB to use, if not defined: current FSB - 50");
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "cpufreq-nforce2", msg)
#define PFX "cpufreq-nforce2: "
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
"cpufreq-nforce2", msg)
/**
* nforce2_calc_fsb - calculate FSB
@@ -118,11 +120,11 @@ static void nforce2_write_pll(int pll)
int temp;
/* Set the pll addr. to 0x00 */
pci_write_config_dword(nforce2_chipset_dev, NFORCE2_PLLADR, 0);
pci_write_config_dword(nforce2_dev, NFORCE2_PLLADR, 0);
/* Now write the value in all 64 registers */
for (temp = 0; temp <= 0x3f; temp++)
pci_write_config_dword(nforce2_chipset_dev, NFORCE2_PLLREG, pll);
pci_write_config_dword(nforce2_dev, NFORCE2_PLLREG, pll);
return;
}
@@ -139,8 +141,8 @@ static unsigned int nforce2_fsb_read(int bootfsb)
u32 fsb, temp = 0;
/* Get chipset boot FSB from subdevice 5 (FSB at boot-time) */
nforce2_sub5 = pci_get_subsys(PCI_VENDOR_ID_NVIDIA,
0x01EF, PCI_ANY_ID, PCI_ANY_ID, NULL);
nforce2_sub5 = pci_get_subsys(PCI_VENDOR_ID_NVIDIA, 0x01EF,
PCI_ANY_ID, PCI_ANY_ID, NULL);
if (!nforce2_sub5)
return 0;
@@ -148,13 +150,13 @@ static unsigned int nforce2_fsb_read(int bootfsb)
fsb /= 1000000;
/* Check if PLL register is already set */
pci_read_config_byte(nforce2_chipset_dev, NFORCE2_PLLENABLE, (u8 *)&temp);
pci_read_config_byte(nforce2_dev, NFORCE2_PLLENABLE, (u8 *)&temp);
if (bootfsb || !temp)
return fsb;
/* Use PLL register FSB value */
pci_read_config_dword(nforce2_chipset_dev, NFORCE2_PLLREG, &temp);
pci_read_config_dword(nforce2_dev, NFORCE2_PLLREG, &temp);
fsb = nforce2_calc_fsb(temp);
return fsb;
@@ -174,18 +176,18 @@ static int nforce2_set_fsb(unsigned int fsb)
int pll = 0;
if ((fsb > max_fsb) || (fsb < NFORCE2_MIN_FSB)) {
printk(KERN_ERR "cpufreq: FSB %d is out of range!\n", fsb);
printk(KERN_ERR PFX "FSB %d is out of range!\n", fsb);
return -EINVAL;
}
tfsb = nforce2_fsb_read(0);
if (!tfsb) {
printk(KERN_ERR "cpufreq: Error while reading the FSB\n");
printk(KERN_ERR PFX "Error while reading the FSB\n");
return -EINVAL;
}
/* First write? Then set actual value */
pci_read_config_byte(nforce2_chipset_dev, NFORCE2_PLLENABLE, (u8 *)&temp);
pci_read_config_byte(nforce2_dev, NFORCE2_PLLENABLE, (u8 *)&temp);
if (!temp) {
pll = nforce2_calc_pll(tfsb);
@@ -197,7 +199,7 @@ static int nforce2_set_fsb(unsigned int fsb)
/* Enable write access */
temp = 0x01;
pci_write_config_byte(nforce2_chipset_dev, NFORCE2_PLLENABLE, (u8)temp);
pci_write_config_byte(nforce2_dev, NFORCE2_PLLENABLE, (u8)temp);
diff = tfsb - fsb;
@@ -222,7 +224,7 @@ static int nforce2_set_fsb(unsigned int fsb)
}
temp = 0x40;
pci_write_config_byte(nforce2_chipset_dev, NFORCE2_PLLADR, (u8)temp);
pci_write_config_byte(nforce2_dev, NFORCE2_PLLADR, (u8)temp);
return 0;
}
@@ -244,7 +246,8 @@ static unsigned int nforce2_get(unsigned int cpu)
* nforce2_target - set a new CPUFreq policy
* @policy: new policy
* @target_freq: the target frequency
* @relation: how that frequency relates to achieved frequency (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
* @relation: how that frequency relates to achieved frequency
* (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
*
* Sets a new CPUFreq policy.
*/
@@ -276,7 +279,7 @@ static int nforce2_target(struct cpufreq_policy *policy,
/* local_irq_save(flags); */
if (nforce2_set_fsb(target_fsb) < 0)
printk(KERN_ERR "cpufreq: Changing FSB to %d failed\n",
printk(KERN_ERR PFX "Changing FSB to %d failed\n",
target_fsb);
else
dprintk("Changed FSB successfully to %d\n",
@@ -327,8 +330,8 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy)
/* FIX: Get FID from CPU */
if (!fid) {
if (!cpu_khz) {
printk(KERN_WARNING
"cpufreq: cpu_khz not set, can't calculate multiplier!\n");
printk(KERN_WARNING PFX
"cpu_khz not set, can't calculate multiplier!\n");
return -ENODEV;
}
@@ -343,7 +346,7 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy)
}
}
printk(KERN_INFO "cpufreq: FSB currently at %i MHz, FID %d.%d\n", fsb,
printk(KERN_INFO PFX "FSB currently at %i MHz, FID %d.%d\n", fsb,
fid / 10, fid % 10);
/* Set maximum FSB to FSB at boot time */
@@ -392,17 +395,18 @@ static struct cpufreq_driver nforce2_driver = {
*/
static unsigned int nforce2_detect_chipset(void)
{
nforce2_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_NVIDIA,
nforce2_dev = pci_get_subsys(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NFORCE2,
PCI_ANY_ID, PCI_ANY_ID, NULL);
if (nforce2_chipset_dev == NULL)
if (nforce2_dev == NULL)
return -ENODEV;
printk(KERN_INFO "cpufreq: Detected nForce2 chipset revision %X\n",
nforce2_chipset_dev->revision);
printk(KERN_INFO
"cpufreq: FSB changing is maybe unstable and can lead to crashes and data loss.\n");
printk(KERN_INFO PFX "Detected nForce2 chipset revision %X\n",
nforce2_dev->revision);
printk(KERN_INFO PFX
"FSB changing is maybe unstable and can lead to "
"crashes and data loss.\n");
return 0;
}
@@ -420,7 +424,7 @@ static int __init nforce2_init(void)
/* detect chipset */
if (nforce2_detect_chipset()) {
printk(KERN_ERR "cpufreq: No nForce2 chipset.\n");
printk(KERN_INFO PFX "No nForce2 chipset.\n");
return -ENODEV;
}

Просмотреть файл

@@ -12,12 +12,12 @@
#include <linux/cpufreq.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/timex.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <asm/msr.h>
#include <asm/tsc.h>
#include <asm/timex.h>
#include <asm/io.h>
#include <asm/delay.h>
#define EPS_BRAND_C7M 0
#define EPS_BRAND_C7 1
@@ -184,7 +184,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
break;
}
switch(brand) {
switch (brand) {
case EPS_BRAND_C7M:
printk(KERN_CONT "C7-M\n");
break;
@@ -218,17 +218,20 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
/* Print voltage and multiplier */
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
current_voltage = lo & 0xff;
printk(KERN_INFO "eps: Current voltage = %dmV\n", current_voltage * 16 + 700);
printk(KERN_INFO "eps: Current voltage = %dmV\n",
current_voltage * 16 + 700);
current_multiplier = (lo >> 8) & 0xff;
printk(KERN_INFO "eps: Current multiplier = %d\n", current_multiplier);
/* Print limits */
max_voltage = hi & 0xff;
printk(KERN_INFO "eps: Highest voltage = %dmV\n", max_voltage * 16 + 700);
printk(KERN_INFO "eps: Highest voltage = %dmV\n",
max_voltage * 16 + 700);
max_multiplier = (hi >> 8) & 0xff;
printk(KERN_INFO "eps: Highest multiplier = %d\n", max_multiplier);
min_voltage = (hi >> 16) & 0xff;
printk(KERN_INFO "eps: Lowest voltage = %dmV\n", min_voltage * 16 + 700);
printk(KERN_INFO "eps: Lowest voltage = %dmV\n",
min_voltage * 16 + 700);
min_multiplier = (hi >> 24) & 0xff;
printk(KERN_INFO "eps: Lowest multiplier = %d\n", min_multiplier);
@@ -318,7 +321,7 @@ static int eps_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
static struct freq_attr* eps_attr[] = {
static struct freq_attr *eps_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
@@ -356,7 +359,7 @@ static void __exit eps_exit(void)
cpufreq_unregister_driver(&eps_driver);
}
MODULE_AUTHOR("Rafa<EFBFBD> Bilski <rafalbilski@interia.pl>");
MODULE_AUTHOR("Rafal Bilski <rafalbilski@interia.pl>");
MODULE_DESCRIPTION("Enhanced PowerSaver driver for VIA C7 CPU's.");
MODULE_LICENSE("GPL");

Просмотреть файл

@@ -184,7 +184,8 @@ static int elanfreq_target(struct cpufreq_policy *policy,
{
unsigned int newstate = 0;
if (cpufreq_frequency_table_target(policy, &elanfreq_table[0], target_freq, relation, &newstate))
if (cpufreq_frequency_table_target(policy, &elanfreq_table[0],
target_freq, relation, &newstate))
return -EINVAL;
elanfreq_set_cpu_state(newstate);
@@ -301,7 +302,8 @@ static void __exit elanfreq_exit(void)
module_param(max_freq, int, 0444);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Robert Schwebel <r.schwebel@pengutronix.de>, Sven Geggus <sven@geggus.net>");
MODULE_AUTHOR("Robert Schwebel <r.schwebel@pengutronix.de>, "
"Sven Geggus <sven@geggus.net>");
MODULE_DESCRIPTION("cpufreq driver for AMD's Elan CPUs");
module_init(elanfreq_init);

Просмотреть файл

@@ -79,8 +79,9 @@
#include <linux/smp.h>
#include <linux/cpufreq.h>
#include <linux/pci.h>
#include <linux/errno.h>
#include <asm/processor-cyrix.h>
#include <asm/errno.h>
/* PCI config registers, all at F0 */
#define PCI_PMER1 0x80 /* power management enable register 1 */
@@ -122,8 +123,8 @@ static struct gxfreq_params *gx_params;
static int stock_freq;
/* PCI bus clock - defaults to 30.000 if cpu_khz is not available */
static int pci_busclk = 0;
module_param (pci_busclk, int, 0444);
static int pci_busclk;
module_param(pci_busclk, int, 0444);
/* maximum duration for which the cpu may be suspended
* (32us * MAX_DURATION). If no parameter is given, this defaults
@@ -132,7 +133,7 @@ module_param (pci_busclk, int, 0444);
* is suspended -- processing power is just 0.39% of what it used to be,
* though. 781.25 kHz(!) for a 200 MHz processor -- wow. */
static int max_duration = 255;
module_param (max_duration, int, 0444);
module_param(max_duration, int, 0444);
/* For the default policy, we want at least some processing power
* - let's say 5%. (min = maxfreq / POLICY_MIN_DIV)
@@ -140,7 +141,8 @@ module_param (max_duration, int, 0444);
#define POLICY_MIN_DIV 20
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "gx-suspmod", msg)
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
"gx-suspmod", msg)
/**
* we can detect a core multipiler from dir0_lsb
@@ -166,12 +168,20 @@ static int gx_freq_mult[16] = {
* Low Level chipset interface *
****************************************************************/
static struct pci_device_id gx_chipset_tbl[] __initdata = {
{ PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520, PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510, PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY,
PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520,
PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510,
PCI_ANY_ID, PCI_ANY_ID },
{ 0, },
};
static void gx_write_byte(int reg, int value)
{
pci_write_config_byte(gx_params->cs55x0, reg, value);
}
/**
* gx_detect_chipset:
*
@@ -200,7 +210,8 @@ static __init struct pci_dev *gx_detect_chipset(void)
/**
* gx_get_cpuspeed:
*
* Finds out at which efficient frequency the Cyrix MediaGX/NatSemi Geode CPU runs.
* Finds out at which efficient frequency the Cyrix MediaGX/NatSemi
* Geode CPU runs.
*/
static unsigned int gx_get_cpuspeed(unsigned int cpu)
{
@@ -217,17 +228,18 @@ static unsigned int gx_get_cpuspeed(unsigned int cpu)
*
**/
static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration, u8 *off_duration)
static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration,
u8 *off_duration)
{
unsigned int i;
u8 tmp_on, tmp_off;
int old_tmp_freq = stock_freq;
int tmp_freq;
*off_duration=1;
*on_duration=0;
*off_duration = 1;
*on_duration = 0;
for (i=max_duration; i>0; i--) {
for (i = max_duration; i > 0; i--) {
tmp_off = ((khz * i) / stock_freq) & 0xff;
tmp_on = i - tmp_off;
tmp_freq = (stock_freq * tmp_off) / i;
@@ -259,26 +271,34 @@ static void gx_set_cpuspeed(unsigned int khz)
freqs.cpu = 0;
freqs.old = gx_get_cpuspeed(0);
new_khz = gx_validate_speed(khz, &gx_params->on_duration, &gx_params->off_duration);
new_khz = gx_validate_speed(khz, &gx_params->on_duration,
&gx_params->off_duration);
freqs.new = new_khz;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
local_irq_save(flags);
if (new_khz != stock_freq) { /* if new khz == 100% of CPU speed, it is special case */
if (new_khz != stock_freq) {
/* if new khz == 100% of CPU speed, it is special case */
switch (gx_params->cs55x0->device) {
case PCI_DEVICE_ID_CYRIX_5530_LEGACY:
pmer1 = gx_params->pci_pmer1 | IRQ_SPDUP | VID_SPDUP;
/* FIXME: need to test other values -- Zwane,Miura */
pci_write_config_byte(gx_params->cs55x0, PCI_IRQTC, 4); /* typical 2 to 4ms */
pci_write_config_byte(gx_params->cs55x0, PCI_VIDTC, 100);/* typical 50 to 100ms */
pci_write_config_byte(gx_params->cs55x0, PCI_PMER1, pmer1);
/* typical 2 to 4ms */
gx_write_byte(PCI_IRQTC, 4);
/* typical 50 to 100ms */
gx_write_byte(PCI_VIDTC, 100);
gx_write_byte(PCI_PMER1, pmer1);
if (gx_params->cs55x0->revision < 0x10) { /* CS5530(rev 1.2, 1.3) */
suscfg = gx_params->pci_suscfg | SUSMOD;
} else { /* CS5530A,B.. */
suscfg = gx_params->pci_suscfg | SUSMOD | PWRSVE;
if (gx_params->cs55x0->revision < 0x10) {
/* CS5530(rev 1.2, 1.3) */
suscfg = gx_params->pci_suscfg|SUSMOD;
} else {
/* CS5530A,B.. */
suscfg = gx_params->pci_suscfg|SUSMOD|PWRSVE;
}
break;
case PCI_DEVICE_ID_CYRIX_5520:
@@ -294,13 +314,13 @@ static void gx_set_cpuspeed(unsigned int khz)
suscfg = gx_params->pci_suscfg & ~(SUSMOD);
gx_params->off_duration = 0;
gx_params->on_duration = 0;
dprintk("suspend modulation disabled: cpu runs 100 percent speed.\n");
dprintk("suspend modulation disabled: cpu runs 100%% speed.\n");
}
pci_write_config_byte(gx_params->cs55x0, PCI_MODOFF, gx_params->off_duration);
pci_write_config_byte(gx_params->cs55x0, PCI_MODON, gx_params->on_duration);
gx_write_byte(PCI_MODOFF, gx_params->off_duration);
gx_write_byte(PCI_MODON, gx_params->on_duration);
pci_write_config_byte(gx_params->cs55x0, PCI_SUSCFG, suscfg);
gx_write_byte(PCI_SUSCFG, suscfg);
pci_read_config_byte(gx_params->cs55x0, PCI_SUSCFG, &suscfg);
local_irq_restore(flags);
@@ -334,7 +354,8 @@ static int cpufreq_gx_verify(struct cpufreq_policy *policy)
return -EINVAL;
policy->cpu = 0;
cpufreq_verify_within_limits(policy, (stock_freq / max_duration), stock_freq);
cpufreq_verify_within_limits(policy, (stock_freq / max_duration),
stock_freq);
/* it needs to be assured that at least one supported frequency is
* within policy->min and policy->max. If it is not, policy->max
@@ -354,7 +375,8 @@ static int cpufreq_gx_verify(struct cpufreq_policy *policy)
policy->max = tmp_freq;
if (policy->max < policy->min)
policy->max = policy->min;
cpufreq_verify_within_limits(policy, (stock_freq / max_duration), stock_freq);
cpufreq_verify_within_limits(policy, (stock_freq / max_duration),
stock_freq);
return 0;
}
@@ -398,18 +420,18 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
return -ENODEV;
/* determine maximum frequency */
if (pci_busclk) {
if (pci_busclk)
maxfreq = pci_busclk * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f];
} else if (cpu_khz) {
else if (cpu_khz)
maxfreq = cpu_khz;
} else {
else
maxfreq = 30000 * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f];
}
stock_freq = maxfreq;
curfreq = gx_get_cpuspeed(0);
dprintk("cpu max frequency is %d.\n", maxfreq);
dprintk("cpu current frequency is %dkHz.\n",curfreq);
dprintk("cpu current frequency is %dkHz.\n", curfreq);
/* setup basic struct for cpufreq API */
policy->cpu = 0;
@@ -447,7 +469,8 @@ static int __init cpufreq_gx_init(void)
struct pci_dev *gx_pci;
/* Test if we have the right hardware */
if ((gx_pci = gx_detect_chipset()) == NULL)
gx_pci = gx_detect_chipset();
if (gx_pci == NULL)
return -ENODEV;
/* check whether module parameters are sane */
@@ -468,9 +491,11 @@ static int __init cpufreq_gx_init(void)
pci_read_config_byte(params->cs55x0, PCI_PMER1, &(params->pci_pmer1));
pci_read_config_byte(params->cs55x0, PCI_PMER2, &(params->pci_pmer2));
pci_read_config_byte(params->cs55x0, PCI_MODON, &(params->on_duration));
pci_read_config_byte(params->cs55x0, PCI_MODOFF, &(params->off_duration));
pci_read_config_byte(params->cs55x0, PCI_MODOFF,
&(params->off_duration));
if ((ret = cpufreq_register_driver(&gx_suspmod_driver))) {
ret = cpufreq_register_driver(&gx_suspmod_driver);
if (ret) {
kfree(params);
return ret; /* register error! */
}
@@ -485,9 +510,9 @@ static void __exit cpufreq_gx_exit(void)
kfree(gx_params);
}
MODULE_AUTHOR ("Hiroshi Miura <miura@da-cha.org>");
MODULE_DESCRIPTION ("Cpufreq driver for Cyrix MediaGX and NatSemi Geode");
MODULE_LICENSE ("GPL");
MODULE_AUTHOR("Hiroshi Miura <miura@da-cha.org>");
MODULE_DESCRIPTION("Cpufreq driver for Cyrix MediaGX and NatSemi Geode");
MODULE_LICENSE("GPL");
module_init(cpufreq_gx_init);
module_exit(cpufreq_gx_exit);

Просмотреть файл

@@ -30,12 +30,12 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/timex.h>
#include <linux/io.h>
#include <linux/acpi.h>
#include <linux/kernel.h>
#include <asm/msr.h>
#include <asm/timex.h>
#include <asm/io.h>
#include <asm/acpi.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
#include "longhaul.h"
@@ -58,7 +58,7 @@
#define USE_NORTHBRIDGE (1 << 2)
static int cpu_model;
static unsigned int numscales=16;
static unsigned int numscales = 16;
static unsigned int fsb;
static const struct mV_pos *vrm_mV_table;
@@ -67,8 +67,8 @@ static const unsigned char *mV_vrm_table;
static unsigned int highest_speed, lowest_speed; /* kHz */
static unsigned int minmult, maxmult;
static int can_scale_voltage;
static struct acpi_processor *pr = NULL;
static struct acpi_processor_cx *cx = NULL;
static struct acpi_processor *pr;
static struct acpi_processor_cx *cx;
static u32 acpi_regs_addr;
static u8 longhaul_flags;
static unsigned int longhaul_index;
@@ -78,12 +78,13 @@ static int scale_voltage;
static int disable_acpi_c3;
static int revid_errata;
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "longhaul", msg)
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
"longhaul", msg)
/* Clock ratios multiplied by 10 */
static int clock_ratio[32];
static int eblcr_table[32];
static int mults[32];
static int eblcr[32];
static int longhaul_version;
static struct cpufreq_frequency_table *longhaul_table;
@@ -93,7 +94,7 @@ static char speedbuffer[8];
static char *print_speed(int speed)
{
if (speed < 1000) {
snprintf(speedbuffer, sizeof(speedbuffer),"%dMHz", speed);
snprintf(speedbuffer, sizeof(speedbuffer), "%dMHz", speed);
return speedbuffer;
}
@@ -122,27 +123,28 @@ static unsigned int calc_speed(int mult)
static int longhaul_get_cpu_mult(void)
{
unsigned long invalue=0,lo, hi;
unsigned long invalue = 0, lo, hi;
rdmsr (MSR_IA32_EBL_CR_POWERON, lo, hi);
invalue = (lo & (1<<22|1<<23|1<<24|1<<25)) >>22;
if (longhaul_version==TYPE_LONGHAUL_V2 || longhaul_version==TYPE_POWERSAVER) {
rdmsr(MSR_IA32_EBL_CR_POWERON, lo, hi);
invalue = (lo & (1<<22|1<<23|1<<24|1<<25))>>22;
if (longhaul_version == TYPE_LONGHAUL_V2 ||
longhaul_version == TYPE_POWERSAVER) {
if (lo & (1<<27))
invalue+=16;
invalue += 16;
}
return eblcr_table[invalue];
return eblcr[invalue];
}
/* For processor with BCR2 MSR */
static void do_longhaul1(unsigned int clock_ratio_index)
static void do_longhaul1(unsigned int mults_index)
{
union msr_bcr2 bcr2;
rdmsrl(MSR_VIA_BCR2, bcr2.val);
/* Enable software clock multiplier */
bcr2.bits.ESOFTBF = 1;
bcr2.bits.CLOCKMUL = clock_ratio_index & 0xff;
bcr2.bits.CLOCKMUL = mults_index & 0xff;
/* Sync to timer tick */
safe_halt();
@@ -161,7 +163,7 @@ static void do_longhaul1(unsigned int clock_ratio_index)
/* For processor with Longhaul MSR */
static void do_powersaver(int cx_address, unsigned int clock_ratio_index,
static void do_powersaver(int cx_address, unsigned int mults_index,
unsigned int dir)
{
union msr_longhaul longhaul;
@@ -173,11 +175,11 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index,
longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
else
longhaul.bits.RevisionKey = 0;
longhaul.bits.SoftBusRatio = clock_ratio_index & 0xf;
longhaul.bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4;
longhaul.bits.SoftBusRatio = mults_index & 0xf;
longhaul.bits.SoftBusRatio4 = (mults_index & 0x10) >> 4;
/* Setup new voltage */
if (can_scale_voltage)
longhaul.bits.SoftVID = (clock_ratio_index >> 8) & 0x1f;
longhaul.bits.SoftVID = (mults_index >> 8) & 0x1f;
/* Sync to timer tick */
safe_halt();
/* Raise voltage if necessary */
@@ -240,14 +242,14 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index,
/**
* longhaul_set_cpu_frequency()
* @clock_ratio_index : bitpattern of the new multiplier.
* @mults_index : bitpattern of the new multiplier.
*
* Sets a new clock ratio.
*/
static void longhaul_setstate(unsigned int table_index)
{
unsigned int clock_ratio_index;
unsigned int mults_index;
int speed, mult;
struct cpufreq_freqs freqs;
unsigned long flags;
@@ -256,9 +258,9 @@ static void longhaul_setstate(unsigned int table_index)
u32 bm_timeout = 1000;
unsigned int dir = 0;
clock_ratio_index = longhaul_table[table_index].index;
mults_index = longhaul_table[table_index].index;
/* Safety precautions */
mult = clock_ratio[clock_ratio_index & 0x1f];
mult = mults[mults_index & 0x1f];
if (mult == -1)
return;
speed = calc_speed(mult);
@@ -274,7 +276,7 @@ static void longhaul_setstate(unsigned int table_index)
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
dprintk ("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
dprintk("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
fsb, mult/10, mult%10, print_speed(speed/1000));
retry_loop:
preempt_disable();
@@ -282,8 +284,8 @@ retry_loop:
pic2_mask = inb(0xA1);
pic1_mask = inb(0x21); /* works on C3. save mask. */
outb(0xFF,0xA1); /* Overkill */
outb(0xFE,0x21); /* TMR0 only */
outb(0xFF, 0xA1); /* Overkill */
outb(0xFE, 0x21); /* TMR0 only */
/* Wait while PCI bus is busy. */
if (acpi_regs_addr && (longhaul_flags & USE_NORTHBRIDGE
@@ -303,7 +305,7 @@ retry_loop:
outb(3, 0x22);
} else if ((pr != NULL) && pr->flags.bm_control) {
/* Disable bus master arbitration */
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
}
switch (longhaul_version) {
@@ -312,7 +314,7 @@ retry_loop:
* Software controlled multipliers only.
*/
case TYPE_LONGHAUL_V1:
do_longhaul1(clock_ratio_index);
do_longhaul1(mults_index);
break;
/*
@@ -326,10 +328,10 @@ retry_loop:
case TYPE_POWERSAVER:
if (longhaul_flags & USE_ACPI_C3) {
/* Don't allow wakeup */
acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
do_powersaver(cx->address, clock_ratio_index, dir);
acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
do_powersaver(cx->address, mults_index, dir);
} else {
do_powersaver(0, clock_ratio_index, dir);
do_powersaver(0, mults_index, dir);
}
break;
}
@@ -339,10 +341,10 @@ retry_loop:
outb(0, 0x22);
} else if ((pr != NULL) && pr->flags.bm_control) {
/* Enable bus master arbitration */
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
}
outb(pic2_mask,0xA1); /* restore mask */
outb(pic1_mask,0x21);
outb(pic2_mask, 0xA1); /* restore mask */
outb(pic1_mask, 0x21);
local_irq_restore(flags);
preempt_enable();
@@ -392,7 +394,8 @@ retry_loop:
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
if (!bm_timeout)
printk(KERN_INFO PFX "Warning: Timeout while waiting for idle PCI bus.\n");
printk(KERN_INFO PFX "Warning: Timeout while waiting for "
"idle PCI bus.\n");
}
/*
@@ -458,31 +461,32 @@ static int __init longhaul_get_ranges(void)
break;
}
dprintk ("MinMult:%d.%dx MaxMult:%d.%dx\n",
dprintk("MinMult:%d.%dx MaxMult:%d.%dx\n",
minmult/10, minmult%10, maxmult/10, maxmult%10);
highest_speed = calc_speed(maxmult);
lowest_speed = calc_speed(minmult);
dprintk ("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb,
dprintk("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb,
print_speed(lowest_speed/1000),
print_speed(highest_speed/1000));
if (lowest_speed == highest_speed) {
printk (KERN_INFO PFX "highestspeed == lowest, aborting.\n");
printk(KERN_INFO PFX "highestspeed == lowest, aborting.\n");
return -EINVAL;
}
if (lowest_speed > highest_speed) {
printk (KERN_INFO PFX "nonsense! lowest (%d > %d) !\n",
printk(KERN_INFO PFX "nonsense! lowest (%d > %d) !\n",
lowest_speed, highest_speed);
return -EINVAL;
}
longhaul_table = kmalloc((numscales + 1) * sizeof(struct cpufreq_frequency_table), GFP_KERNEL);
if(!longhaul_table)
longhaul_table = kmalloc((numscales + 1) * sizeof(*longhaul_table),
GFP_KERNEL);
if (!longhaul_table)
return -ENOMEM;
for (j = 0; j < numscales; j++) {
ratio = clock_ratio[j];
ratio = mults[j];
if (ratio == -1)
continue;
if (ratio > maxmult || ratio < minmult)
@@ -507,13 +511,10 @@ static int __init longhaul_get_ranges(void)
}
}
if (min_i != j) {
unsigned int temp;
temp = longhaul_table[j].frequency;
longhaul_table[j].frequency = longhaul_table[min_i].frequency;
longhaul_table[min_i].frequency = temp;
temp = longhaul_table[j].index;
longhaul_table[j].index = longhaul_table[min_i].index;
longhaul_table[min_i].index = temp;
swap(longhaul_table[j].frequency,
longhaul_table[min_i].frequency);
swap(longhaul_table[j].index,
longhaul_table[min_i].index);
}
}
@@ -521,7 +522,7 @@ static int __init longhaul_get_ranges(void)
/* Find index we are running on */
for (j = 0; j < k; j++) {
if (clock_ratio[longhaul_table[j].index & 0x1f] == mult) {
if (mults[longhaul_table[j].index & 0x1f] == mult) {
longhaul_index = j;
break;
}
@@ -559,20 +560,22 @@ static void __init longhaul_setup_voltagescaling(void)
maxvid = vrm_mV_table[longhaul.bits.MaximumVID];
if (minvid.mV == 0 || maxvid.mV == 0 || minvid.mV > maxvid.mV) {
printk (KERN_INFO PFX "Bogus values Min:%d.%03d Max:%d.%03d. "
printk(KERN_INFO PFX "Bogus values Min:%d.%03d Max:%d.%03d. "
"Voltage scaling disabled.\n",
minvid.mV/1000, minvid.mV%1000, maxvid.mV/1000, maxvid.mV%1000);
minvid.mV/1000, minvid.mV%1000,
maxvid.mV/1000, maxvid.mV%1000);
return;
}
if (minvid.mV == maxvid.mV) {
printk (KERN_INFO PFX "Claims to support voltage scaling but min & max are "
"both %d.%03d. Voltage scaling disabled\n",
printk(KERN_INFO PFX "Claims to support voltage scaling but "
"min & max are both %d.%03d. "
"Voltage scaling disabled\n",
maxvid.mV/1000, maxvid.mV%1000);
return;
}
/* How many voltage steps */
/* How many voltage steps*/
numvscales = maxvid.pos - minvid.pos + 1;
printk(KERN_INFO PFX
"Max VID=%d.%03d "
@@ -586,7 +589,7 @@ static void __init longhaul_setup_voltagescaling(void)
j = longhaul.bits.MinMHzBR;
if (longhaul.bits.MinMHzBR4)
j += 16;
min_vid_speed = eblcr_table[j];
min_vid_speed = eblcr[j];
if (min_vid_speed == -1)
return;
switch (longhaul.bits.MinMHzFSB) {
@@ -617,7 +620,8 @@ static void __init longhaul_setup_voltagescaling(void)
pos = minvid.pos;
longhaul_table[j].index |= mV_vrm_table[pos] << 8;
vid = vrm_mV_table[mV_vrm_table[pos]];
printk(KERN_INFO PFX "f: %d kHz, index: %d, vid: %d mV\n", speed, j, vid.mV);
printk(KERN_INFO PFX "f: %d kHz, index: %d, vid: %d mV\n",
speed, j, vid.mV);
j++;
}
@@ -640,7 +644,8 @@ static int longhaul_target(struct cpufreq_policy *policy,
unsigned int dir = 0;
u8 vid, current_vid;
if (cpufreq_frequency_table_target(policy, longhaul_table, target_freq, relation, &table_index))
if (cpufreq_frequency_table_target(policy, longhaul_table, target_freq,
relation, &table_index))
return -EINVAL;
/* Don't set same frequency again */
@@ -656,7 +661,8 @@ static int longhaul_target(struct cpufreq_policy *policy,
* this in hardware, C3 is old and we need to do this
* in software. */
i = longhaul_index;
current_vid = (longhaul_table[longhaul_index].index >> 8) & 0x1f;
current_vid = (longhaul_table[longhaul_index].index >> 8);
current_vid &= 0x1f;
if (table_index > longhaul_index)
dir = 1;
while (i != table_index) {
@@ -691,9 +697,9 @@ static acpi_status longhaul_walk_callback(acpi_handle obj_handle,
{
struct acpi_device *d;
if ( acpi_bus_get_device(obj_handle, &d) ) {
if (acpi_bus_get_device(obj_handle, &d))
return 0;
}
*return_value = acpi_driver_data(d);
return 1;
}
@@ -750,7 +756,7 @@ static int longhaul_setup_southbridge(void)
/* Find VT8235 southbridge */
dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, NULL);
if (dev == NULL)
/* Find VT8237 southbridge */
/* Find VT8237 southbridge */
dev = pci_get_device(PCI_VENDOR_ID_VIA,
PCI_DEVICE_ID_VIA_8237, NULL);
if (dev != NULL) {
@@ -769,7 +775,8 @@ static int longhaul_setup_southbridge(void)
if (pci_cmd & 1 << 7) {
pci_read_config_dword(dev, 0x88, &acpi_regs_addr);
acpi_regs_addr &= 0xff00;
printk(KERN_INFO PFX "ACPI I/O at 0x%x\n", acpi_regs_addr);
printk(KERN_INFO PFX "ACPI I/O at 0x%x\n",
acpi_regs_addr);
}
pci_dev_put(dev);
@@ -781,7 +788,7 @@ static int longhaul_setup_southbridge(void)
static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
{
struct cpuinfo_x86 *c = &cpu_data(0);
char *cpuname=NULL;
char *cpuname = NULL;
int ret;
u32 lo, hi;
@@ -791,8 +798,8 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
cpu_model = CPU_SAMUEL;
cpuname = "C3 'Samuel' [C5A]";
longhaul_version = TYPE_LONGHAUL_V1;
memcpy (clock_ratio, samuel1_clock_ratio, sizeof(samuel1_clock_ratio));
memcpy (eblcr_table, samuel1_eblcr, sizeof(samuel1_eblcr));
memcpy(mults, samuel1_mults, sizeof(samuel1_mults));
memcpy(eblcr, samuel1_eblcr, sizeof(samuel1_eblcr));
break;
case 7:
@@ -803,10 +810,8 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
cpuname = "C3 'Samuel 2' [C5B]";
/* Note, this is not a typo, early Samuel2's had
* Samuel1 ratios. */
memcpy(clock_ratio, samuel1_clock_ratio,
sizeof(samuel1_clock_ratio));
memcpy(eblcr_table, samuel2_eblcr,
sizeof(samuel2_eblcr));
memcpy(mults, samuel1_mults, sizeof(samuel1_mults));
memcpy(eblcr, samuel2_eblcr, sizeof(samuel2_eblcr));
break;
case 1 ... 15:
longhaul_version = TYPE_LONGHAUL_V1;
@@ -817,10 +822,8 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
cpu_model = CPU_EZRA;
cpuname = "C3 'Ezra' [C5C]";
}
memcpy(clock_ratio, ezra_clock_ratio,
sizeof(ezra_clock_ratio));
memcpy(eblcr_table, ezra_eblcr,
sizeof(ezra_eblcr));
memcpy(mults, ezra_mults, sizeof(ezra_mults));
memcpy(eblcr, ezra_eblcr, sizeof(ezra_eblcr));
break;
}
break;
@@ -829,18 +832,16 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
cpu_model = CPU_EZRA_T;
cpuname = "C3 'Ezra-T' [C5M]";
longhaul_version = TYPE_POWERSAVER;
numscales=32;
memcpy (clock_ratio, ezrat_clock_ratio, sizeof(ezrat_clock_ratio));
memcpy (eblcr_table, ezrat_eblcr, sizeof(ezrat_eblcr));
numscales = 32;
memcpy(mults, ezrat_mults, sizeof(ezrat_mults));
memcpy(eblcr, ezrat_eblcr, sizeof(ezrat_eblcr));
break;
case 9:
longhaul_version = TYPE_POWERSAVER;
numscales = 32;
memcpy(clock_ratio,
nehemiah_clock_ratio,
sizeof(nehemiah_clock_ratio));
memcpy(eblcr_table, nehemiah_eblcr, sizeof(nehemiah_eblcr));
memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults));
memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr));
switch (c->x86_mask) {
case 0 ... 1:
cpu_model = CPU_NEHEMIAH;
@@ -869,14 +870,14 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
longhaul_version = TYPE_LONGHAUL_V1;
}
printk (KERN_INFO PFX "VIA %s CPU detected. ", cpuname);
printk(KERN_INFO PFX "VIA %s CPU detected. ", cpuname);
switch (longhaul_version) {
case TYPE_LONGHAUL_V1:
case TYPE_LONGHAUL_V2:
printk ("Longhaul v%d supported.\n", longhaul_version);
printk(KERN_CONT "Longhaul v%d supported.\n", longhaul_version);
break;
case TYPE_POWERSAVER:
printk ("Powersaver supported.\n");
printk(KERN_CONT "Powersaver supported.\n");
break;
};
@@ -940,7 +941,7 @@ static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
static struct freq_attr* longhaul_attr[] = {
static struct freq_attr *longhaul_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
@@ -966,13 +967,15 @@ static int __init longhaul_init(void)
#ifdef CONFIG_SMP
if (num_online_cpus() > 1) {
printk(KERN_ERR PFX "More than 1 CPU detected, longhaul disabled.\n");
printk(KERN_ERR PFX "More than 1 CPU detected, "
"longhaul disabled.\n");
return -ENODEV;
}
#endif
#ifdef CONFIG_X86_IO_APIC
if (cpu_has_apic) {
printk(KERN_ERR PFX "APIC detected. Longhaul is currently broken in this configuration.\n");
printk(KERN_ERR PFX "APIC detected. Longhaul is currently "
"broken in this configuration.\n");
return -ENODEV;
}
#endif
@@ -993,8 +996,8 @@ static void __exit longhaul_exit(void)
{
int i;
for (i=0; i < numscales; i++) {
if (clock_ratio[i] == maxmult) {
for (i = 0; i < numscales; i++) {
if (mults[i] == maxmult) {
longhaul_setstate(i);
break;
}
@@ -1007,11 +1010,11 @@ static void __exit longhaul_exit(void)
/* Even if BIOS is exporting ACPI C3 state, and it is used
* with success when CPU is idle, this state doesn't
* trigger frequency transition in some cases. */
module_param (disable_acpi_c3, int, 0644);
module_param(disable_acpi_c3, int, 0644);
MODULE_PARM_DESC(disable_acpi_c3, "Don't use ACPI C3 support");
/* Change CPU voltage with frequency. Very usefull to save
* power, but most VIA C3 processors aren't supporting it. */
module_param (scale_voltage, int, 0644);
module_param(scale_voltage, int, 0644);
MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor");
/* Force revision key to 0 for processors which doesn't
* support voltage scaling, but are introducing itself as
@@ -1019,9 +1022,9 @@ MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor");
module_param(revid_errata, int, 0644);
MODULE_PARM_DESC(revid_errata, "Ignore CPU Revision ID");
MODULE_AUTHOR ("Dave Jones <davej@redhat.com>");
MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors.");
MODULE_LICENSE ("GPL");
MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
MODULE_DESCRIPTION("Longhaul driver for VIA Cyrix processors.");
MODULE_LICENSE("GPL");
late_initcall(longhaul_init);
module_exit(longhaul_exit);

Просмотреть файл

@@ -49,14 +49,14 @@ union msr_longhaul {
/*
* Clock ratio tables. Div/Mod by 10 to get ratio.
* The eblcr ones specify the ratio read from the CPU.
* The clock_ratio ones specify what to write to the CPU.
* The eblcr values specify the ratio read from the CPU.
* The mults values specify what to write to the CPU.
*/
/*
* VIA C3 Samuel 1 & Samuel 2 (stepping 0)
*/
static const int __initdata samuel1_clock_ratio[16] = {
static const int __initdata samuel1_mults[16] = {
-1, /* 0000 -> RESERVED */
30, /* 0001 -> 3.0x */
40, /* 0010 -> 4.0x */
@@ -119,7 +119,7 @@ static const int __initdata samuel2_eblcr[16] = {
/*
* VIA C3 Ezra
*/
static const int __initdata ezra_clock_ratio[16] = {
static const int __initdata ezra_mults[16] = {
100, /* 0000 -> 10.0x */
30, /* 0001 -> 3.0x */
40, /* 0010 -> 4.0x */
@@ -160,7 +160,7 @@ static const int __initdata ezra_eblcr[16] = {
/*
* VIA C3 (Ezra-T) [C5M].
*/
static const int __initdata ezrat_clock_ratio[32] = {
static const int __initdata ezrat_mults[32] = {
100, /* 0000 -> 10.0x */
30, /* 0001 -> 3.0x */
40, /* 0010 -> 4.0x */
@@ -235,7 +235,7 @@ static const int __initdata ezrat_eblcr[32] = {
/*
* VIA C3 Nehemiah */
static const int __initdata nehemiah_clock_ratio[32] = {
static const int __initdata nehemiah_mults[32] = {
100, /* 0000 -> 10.0x */
-1, /* 0001 -> 16.0x */
40, /* 0010 -> 4.0x */

Просмотреть файл

@@ -11,12 +11,13 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/cpufreq.h>
#include <linux/timex.h>
#include <asm/msr.h>
#include <asm/processor.h>
#include <asm/timex.h>
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "longrun", msg)
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
"longrun", msg)
static struct cpufreq_driver longrun_driver;
@@ -51,7 +52,7 @@ static void __init longrun_get_policy(struct cpufreq_policy *policy)
msr_lo &= 0x0000007F;
msr_hi &= 0x0000007F;
if ( longrun_high_freq <= longrun_low_freq ) {
if (longrun_high_freq <= longrun_low_freq) {
/* Assume degenerate Longrun table */
policy->min = policy->max = longrun_high_freq;
} else {
@@ -79,7 +80,7 @@ static int longrun_set_policy(struct cpufreq_policy *policy)
if (!policy)
return -EINVAL;
if ( longrun_high_freq <= longrun_low_freq ) {
if (longrun_high_freq <= longrun_low_freq) {
/* Assume degenerate Longrun table */
pctg_lo = pctg_hi = 100;
} else {
@@ -152,7 +153,7 @@ static unsigned int longrun_get(unsigned int cpu)
cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
dprintk("cpuid eax is %u\n", eax);
return (eax * 1000);
return eax * 1000;
}
/**
@@ -196,7 +197,8 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq,
rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi);
*high_freq = msr_lo * 1000; /* to kHz */
dprintk("longrun table interface told %u - %u kHz\n", *low_freq, *high_freq);
dprintk("longrun table interface told %u - %u kHz\n",
*low_freq, *high_freq);
if (*low_freq > *high_freq)
*low_freq = *high_freq;
@@ -219,7 +221,7 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq,
cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
/* try decreasing in 10% steps, some processors react only
* on some barrier values */
for (try_hi = 80; try_hi > 0 && ecx > 90; try_hi -=10) {
for (try_hi = 80; try_hi > 0 && ecx > 90; try_hi -= 10) {
/* set to 0 to try_hi perf_pctg */
msr_lo &= 0xFFFFFF80;
msr_hi &= 0xFFFFFF80;
@@ -236,7 +238,7 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq,
/* performance_pctg = (current_freq - low_freq)/(high_freq - low_freq)
* eqals
* low_freq * ( 1 - perf_pctg) = (cur_freq - high_freq * perf_pctg)
* low_freq * (1 - perf_pctg) = (cur_freq - high_freq * perf_pctg)
*
* high_freq * perf_pctg is stored tempoarily into "ebx".
*/
@@ -317,9 +319,10 @@ static void __exit longrun_exit(void)
}
MODULE_AUTHOR ("Dominik Brodowski <linux@brodo.de>");
MODULE_DESCRIPTION ("LongRun driver for Transmeta Crusoe and Efficeon processors.");
MODULE_LICENSE ("GPL");
MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
MODULE_DESCRIPTION("LongRun driver for Transmeta Crusoe and "
"Efficeon processors.");
MODULE_LICENSE("GPL");
module_init(longrun_init);
module_exit(longrun_exit);

Просмотреть файл

@@ -27,15 +27,17 @@
#include <linux/cpufreq.h>
#include <linux/slab.h>
#include <linux/cpumask.h>
#include <linux/timex.h>
#include <asm/processor.h>
#include <asm/msr.h>
#include <asm/timex.h>
#include <asm/timer.h>
#include "speedstep-lib.h"
#define PFX "p4-clockmod: "
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "p4-clockmod", msg)
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
"p4-clockmod", msg)
/*
* Duty Cycle (3bits), note DC_DISABLE is not specified in
@@ -58,7 +60,8 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate)
{
u32 l, h;
if (!cpu_online(cpu) || (newstate > DC_DISABLE) || (newstate == DC_RESV))
if (!cpu_online(cpu) ||
(newstate > DC_DISABLE) || (newstate == DC_RESV))
return -EINVAL;
rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h);
@@ -66,7 +69,8 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate)
if (l & 0x01)
dprintk("CPU#%d currently thermal throttled\n", cpu);
if (has_N44_O17_errata[cpu] && (newstate == DC_25PT || newstate == DC_DFLT))
if (has_N44_O17_errata[cpu] &&
(newstate == DC_25PT || newstate == DC_DFLT))
newstate = DC_38PT;
rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h);
@@ -112,7 +116,8 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
struct cpufreq_freqs freqs;
int i;
if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0], target_freq, relation, &newstate))
if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0],
target_freq, relation, &newstate))
return -EINVAL;
freqs.old = cpufreq_p4_get(policy->cpu);
@@ -127,7 +132,8 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
}
/* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software
/* run on each logical CPU,
* see section 13.15.3 of IA32 Intel Architecture Software
* Developer's Manual, Volume 3
*/
for_each_cpu(i, policy->cpus)
@@ -153,28 +159,30 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
{
if (c->x86 == 0x06) {
if (cpu_has(c, X86_FEATURE_EST))
printk(KERN_WARNING PFX "Warning: EST-capable CPU detected. "
"The acpi-cpufreq module offers voltage scaling"
" in addition of frequency scaling. You should use "
"that instead of p4-clockmod, if possible.\n");
printk(KERN_WARNING PFX "Warning: EST-capable CPU "
"detected. The acpi-cpufreq module offers "
"voltage scaling in addition of frequency "
"scaling. You should use that instead of "
"p4-clockmod, if possible.\n");
switch (c->x86_model) {
case 0x0E: /* Core */
case 0x0F: /* Core Duo */
case 0x16: /* Celeron Core */
p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PCORE);
return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
case 0x0D: /* Pentium M (Dothan) */
p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
/* fall through */
case 0x09: /* Pentium M (Banias) */
return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PM);
return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
}
}
if (c->x86 != 0xF) {
if (!cpu_has(c, X86_FEATURE_EST))
printk(KERN_WARNING PFX "Unknown p4-clockmod-capable CPU. "
"Please send an e-mail to <cpufreq@vger.kernel.org>\n");
printk(KERN_WARNING PFX "Unknown CPU. "
"Please send an e-mail to "
"<cpufreq@vger.kernel.org>\n");
return 0;
}
@@ -182,16 +190,16 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
* throttling is active or not. */
p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
if (speedstep_detect_processor() == SPEEDSTEP_PROCESSOR_P4M) {
if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
"The speedstep-ich or acpi cpufreq modules offer "
"voltage scaling in addition of frequency scaling. "
"You should use either one instead of p4-clockmod, "
"if possible.\n");
return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_P4M);
return speedstep_get_frequency(SPEEDSTEP_CPU_P4M);
}
return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_P4D);
return speedstep_get_frequency(SPEEDSTEP_CPU_P4D);
}
@@ -203,7 +211,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
unsigned int i;
#ifdef CONFIG_SMP
cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu));
cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
#endif
/* Errata workaround */
@@ -217,14 +225,20 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
dprintk("has errata -- disabling low frequencies\n");
}
if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4D &&
c->x86_model < 2) {
/* switch to maximum frequency and measure result */
cpufreq_p4_setdc(policy->cpu, DC_DISABLE);
recalibrate_cpu_khz();
}
/* get max frequency */
stock_freq = cpufreq_p4_get_frequency(c);
if (!stock_freq)
return -EINVAL;
/* table init */
for (i=1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) {
if ((i<2) && (has_N44_O17_errata[policy->cpu]))
for (i = 1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) {
if ((i < 2) && (has_N44_O17_errata[policy->cpu]))
p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
else
p4clockmod_table[i].frequency = (stock_freq * i)/8;
@@ -232,7 +246,10 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu);
/* cpuinfo and default policy values */
policy->cpuinfo.transition_latency = 1000000; /* assumed */
/* the transition latency is set to be 1 higher than the maximum
* transition latency of the ondemand governor */
policy->cpuinfo.transition_latency = 10000001;
policy->cur = stock_freq;
return cpufreq_frequency_table_cpuinfo(policy, &p4clockmod_table[0]);
@@ -258,12 +275,12 @@ static unsigned int cpufreq_p4_get(unsigned int cpu)
l = DC_DISABLE;
if (l != DC_DISABLE)
return (stock_freq * l / 8);
return stock_freq * l / 8;
return stock_freq;
}
static struct freq_attr* p4clockmod_attr[] = {
static struct freq_attr *p4clockmod_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
@@ -298,9 +315,10 @@ static int __init cpufreq_p4_init(void)
ret = cpufreq_register_driver(&p4clockmod_driver);
if (!ret)
printk(KERN_INFO PFX "P4/Xeon(TM) CPU On-Demand Clock Modulation available\n");
printk(KERN_INFO PFX "P4/Xeon(TM) CPU On-Demand Clock "
"Modulation available\n");
return (ret);
return ret;
}
@@ -310,9 +328,9 @@ static void __exit cpufreq_p4_exit(void)
}
MODULE_AUTHOR ("Zwane Mwaikambo <zwane@commfireservices.com>");
MODULE_DESCRIPTION ("cpufreq driver for Pentium(TM) 4/Xeon(TM)");
MODULE_LICENSE ("GPL");
MODULE_AUTHOR("Zwane Mwaikambo <zwane@commfireservices.com>");
MODULE_DESCRIPTION("cpufreq driver for Pentium(TM) 4/Xeon(TM)");
MODULE_LICENSE("GPL");
late_initcall(cpufreq_p4_init);
module_exit(cpufreq_p4_exit);

Просмотреть файл

@@ -1,6 +1,7 @@
/*
* This file was based upon code in Powertweak Linux (http://powertweak.sf.net)
* (C) 2000-2003 Dave Jones, Arjan van de Ven, Janne Pänkälä, Dominik Brodowski.
* (C) 2000-2003 Dave Jones, Arjan van de Ven, Janne Pänkälä,
* Dominik Brodowski.
*
* Licensed under the terms of the GNU GPL License version 2.
*
@@ -13,14 +14,15 @@
#include <linux/cpufreq.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <asm/msr.h>
#include <linux/timex.h>
#include <linux/io.h>
#include <asm/msr.h>
#define POWERNOW_IOPORT 0xfff0 /* it doesn't matter where, as long
as it is unused */
#define PFX "powernow-k6: "
static unsigned int busfreq; /* FSB, in 10 kHz */
static unsigned int max_multiplier;
@@ -47,8 +49,8 @@ static struct cpufreq_frequency_table clock_ratio[] = {
*/
static int powernow_k6_get_cpu_multiplier(void)
{
u64 invalue = 0;
u32 msrval;
u64 invalue = 0;
u32 msrval;
msrval = POWERNOW_IOPORT + 0x1;
wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
@@ -68,12 +70,12 @@ static int powernow_k6_get_cpu_multiplier(void)
*/
static void powernow_k6_set_state(unsigned int best_i)
{
unsigned long outvalue = 0, invalue = 0;
unsigned long msrval;
struct cpufreq_freqs freqs;
unsigned long outvalue = 0, invalue = 0;
unsigned long msrval;
struct cpufreq_freqs freqs;
if (clock_ratio[best_i].index > max_multiplier) {
printk(KERN_ERR "cpufreq: invalid target frequency\n");
printk(KERN_ERR PFX "invalid target frequency\n");
return;
}
@@ -119,7 +121,8 @@ static int powernow_k6_verify(struct cpufreq_policy *policy)
* powernow_k6_setpolicy - sets a new CPUFreq policy
* @policy: new policy
* @target_freq: the target frequency
* @relation: how that frequency relates to achieved frequency (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
* @relation: how that frequency relates to achieved frequency
* (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
*
* sets a new CPUFreq policy
*/
@@ -127,9 +130,10 @@ static int powernow_k6_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
unsigned int newstate = 0;
unsigned int newstate = 0;
if (cpufreq_frequency_table_target(policy, &clock_ratio[0], target_freq, relation, &newstate))
if (cpufreq_frequency_table_target(policy, &clock_ratio[0],
target_freq, relation, &newstate))
return -EINVAL;
powernow_k6_set_state(newstate);
@@ -140,7 +144,7 @@ static int powernow_k6_target(struct cpufreq_policy *policy,
static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
{
unsigned int i;
unsigned int i, f;
int result;
if (policy->cpu != 0)
@@ -152,10 +156,11 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
/* table init */
for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
if (clock_ratio[i].index > max_multiplier)
f = clock_ratio[i].index;
if (f > max_multiplier)
clock_ratio[i].frequency = CPUFREQ_ENTRY_INVALID;
else
clock_ratio[i].frequency = busfreq * clock_ratio[i].index;
clock_ratio[i].frequency = busfreq * f;
}
/* cpuinfo and default policy values */
@@ -185,7 +190,9 @@ static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
static unsigned int powernow_k6_get(unsigned int cpu)
{
return busfreq * powernow_k6_get_cpu_multiplier();
unsigned int ret;
ret = (busfreq * powernow_k6_get_cpu_multiplier());
return ret;
}
static struct freq_attr *powernow_k6_attr[] = {
@@ -221,7 +228,7 @@ static int __init powernow_k6_init(void)
return -ENODEV;
if (!request_region(POWERNOW_IOPORT, 16, "PowerNow!")) {
printk("cpufreq: PowerNow IOPORT region already used.\n");
printk(KERN_INFO PFX "PowerNow IOPORT region already used.\n");
return -EIO;
}
@@ -246,7 +253,8 @@ static void __exit powernow_k6_exit(void)
}
MODULE_AUTHOR("Arjan van de Ven, Dave Jones <davej@redhat.com>, Dominik Brodowski <linux@brodo.de>");
MODULE_AUTHOR("Arjan van de Ven, Dave Jones <davej@redhat.com>, "
"Dominik Brodowski <linux@brodo.de>");
MODULE_DESCRIPTION("PowerNow! driver for AMD K6-2+ / K6-3+ processors.");
MODULE_LICENSE("GPL");

Просмотреть файл

@@ -6,10 +6,12 @@
* Licensed under the terms of the GNU GPL License version 2.
* Based upon datasheets & sample CPUs kindly provided by AMD.
*
* Errata 5: Processor may fail to execute a FID/VID change in presence of interrupt.
* - We cli/sti on stepping A0 CPUs around the FID/VID transition.
* Errata 15: Processors with half frequency multipliers may hang upon wakeup from disconnect.
* - We disable half multipliers if ACPI is used on A0 stepping CPUs.
* Errata 5:
* CPU may fail to execute a FID/VID change in presence of interrupt.
* - We cli/sti on stepping A0 CPUs around the FID/VID transition.
* Errata 15:
* CPU with half frequency multipliers may hang upon wakeup from disconnect.
* - We disable half multipliers if ACPI is used on A0 stepping CPUs.
*/
#include <linux/kernel.h>
@@ -20,11 +22,11 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/dmi.h>
#include <linux/timex.h>
#include <linux/io.h>
#include <asm/timer.h> /* Needed for recalibrate_cpu_khz() */
#include <asm/msr.h>
#include <asm/timer.h>
#include <asm/timex.h>
#include <asm/io.h>
#include <asm/system.h>
#ifdef CONFIG_X86_POWERNOW_K7_ACPI
@@ -58,9 +60,9 @@ struct pst_s {
union powernow_acpi_control_t {
struct {
unsigned long fid:5,
vid:5,
sgtc:20,
res1:2;
vid:5,
sgtc:20,
res1:2;
} bits;
unsigned long val;
};
@@ -94,14 +96,15 @@ static struct cpufreq_frequency_table *powernow_table;
static unsigned int can_scale_bus;
static unsigned int can_scale_vid;
static unsigned int minimum_speed=-1;
static unsigned int minimum_speed = -1;
static unsigned int maximum_speed;
static unsigned int number_scales;
static unsigned int fsb;
static unsigned int latency;
static char have_a0;
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "powernow-k7", msg)
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
"powernow-k7", msg)
static int check_fsb(unsigned int fsbspeed)
{
@@ -109,7 +112,7 @@ static int check_fsb(unsigned int fsbspeed)
unsigned int f = fsb / 1000;
delta = (fsbspeed > f) ? fsbspeed - f : f - fsbspeed;
return (delta < 5);
return delta < 5;
}
static int check_powernow(void)
@@ -117,24 +120,26 @@ static int check_powernow(void)
struct cpuinfo_x86 *c = &cpu_data(0);
unsigned int maxei, eax, ebx, ecx, edx;
if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 !=6)) {
if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 != 6)) {
#ifdef MODULE
printk (KERN_INFO PFX "This module only works with AMD K7 CPUs\n");
printk(KERN_INFO PFX "This module only works with "
"AMD K7 CPUs\n");
#endif
return 0;
}
/* Get maximum capabilities */
maxei = cpuid_eax (0x80000000);
maxei = cpuid_eax(0x80000000);
if (maxei < 0x80000007) { /* Any powernow info ? */
#ifdef MODULE
printk (KERN_INFO PFX "No powernow capabilities detected\n");
printk(KERN_INFO PFX "No powernow capabilities detected\n");
#endif
return 0;
}
if ((c->x86_model == 6) && (c->x86_mask == 0)) {
printk (KERN_INFO PFX "K7 660[A0] core detected, enabling errata workarounds\n");
printk(KERN_INFO PFX "K7 660[A0] core detected, "
"enabling errata workarounds\n");
have_a0 = 1;
}
@@ -144,37 +149,42 @@ static int check_powernow(void)
if (!(edx & (1 << 1 | 1 << 2)))
return 0;
printk (KERN_INFO PFX "PowerNOW! Technology present. Can scale: ");
printk(KERN_INFO PFX "PowerNOW! Technology present. Can scale: ");
if (edx & 1 << 1) {
printk ("frequency");
can_scale_bus=1;
printk("frequency");
can_scale_bus = 1;
}
if ((edx & (1 << 1 | 1 << 2)) == 0x6)
printk (" and ");
printk(" and ");
if (edx & 1 << 2) {
printk ("voltage");
can_scale_vid=1;
printk("voltage");
can_scale_vid = 1;
}
printk (".\n");
printk(".\n");
return 1;
}
static void invalidate_entry(unsigned int entry)
{
powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID;
}
static int get_ranges (unsigned char *pst)
static int get_ranges(unsigned char *pst)
{
unsigned int j;
unsigned int speed;
u8 fid, vid;
powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) * (number_scales + 1)), GFP_KERNEL);
powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) *
(number_scales + 1)), GFP_KERNEL);
if (!powernow_table)
return -ENOMEM;
for (j=0 ; j < number_scales; j++) {
for (j = 0 ; j < number_scales; j++) {
fid = *pst++;
powernow_table[j].frequency = (fsb * fid_codes[fid]) / 10;
@@ -182,10 +192,10 @@ static int get_ranges (unsigned char *pst)
speed = powernow_table[j].frequency;
if ((fid_codes[fid] % 10)==5) {
if ((fid_codes[fid] % 10) == 5) {
#ifdef CONFIG_X86_POWERNOW_K7_ACPI
if (have_a0 == 1)
powernow_table[j].frequency = CPUFREQ_ENTRY_INVALID;
invalidate_entry(j);
#endif
}
@@ -197,7 +207,7 @@ static int get_ranges (unsigned char *pst)
vid = *pst++;
powernow_table[j].index |= (vid << 8); /* upper 8 bits */
dprintk (" FID: 0x%x (%d.%dx [%dMHz]) "
dprintk(" FID: 0x%x (%d.%dx [%dMHz]) "
"VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
fid_codes[fid] % 10, speed/1000, vid,
mobile_vid_table[vid]/1000,
@@ -214,13 +224,13 @@ static void change_FID(int fid)
{
union msr_fidvidctl fidvidctl;
rdmsrl (MSR_K7_FID_VID_CTL, fidvidctl.val);
rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
if (fidvidctl.bits.FID != fid) {
fidvidctl.bits.SGTC = latency;
fidvidctl.bits.FID = fid;
fidvidctl.bits.VIDC = 0;
fidvidctl.bits.FIDC = 1;
wrmsrl (MSR_K7_FID_VID_CTL, fidvidctl.val);
wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
}
}
@@ -229,18 +239,18 @@ static void change_VID(int vid)
{
union msr_fidvidctl fidvidctl;
rdmsrl (MSR_K7_FID_VID_CTL, fidvidctl.val);
rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
if (fidvidctl.bits.VID != vid) {
fidvidctl.bits.SGTC = latency;
fidvidctl.bits.VID = vid;
fidvidctl.bits.FIDC = 0;
fidvidctl.bits.VIDC = 1;
wrmsrl (MSR_K7_FID_VID_CTL, fidvidctl.val);
wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
}
}
static void change_speed (unsigned int index)
static void change_speed(unsigned int index)
{
u8 fid, vid;
struct cpufreq_freqs freqs;
@@ -257,7 +267,7 @@ static void change_speed (unsigned int index)
freqs.cpu = 0;
rdmsrl (MSR_K7_FID_VID_STATUS, fidvidstatus.val);
rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
cfid = fidvidstatus.bits.CFID;
freqs.old = fsb * fid_codes[cfid] / 10;
@@ -321,12 +331,14 @@ static int powernow_acpi_init(void)
goto err1;
}
if (acpi_processor_perf->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) {
if (acpi_processor_perf->control_register.space_id !=
ACPI_ADR_SPACE_FIXED_HARDWARE) {
retval = -ENODEV;
goto err2;
}
if (acpi_processor_perf->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) {
if (acpi_processor_perf->status_register.space_id !=
ACPI_ADR_SPACE_FIXED_HARDWARE) {
retval = -ENODEV;
goto err2;
}
@@ -338,7 +350,8 @@ static int powernow_acpi_init(void)
goto err2;
}
powernow_table = kzalloc((number_scales + 1) * (sizeof(struct cpufreq_frequency_table)), GFP_KERNEL);
powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) *
(number_scales + 1)), GFP_KERNEL);
if (!powernow_table) {
retval = -ENOMEM;
goto err2;
@@ -352,7 +365,7 @@ static int powernow_acpi_init(void)
unsigned int speed, speed_mhz;
pc.val = (unsigned long) state->control;
dprintk ("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n",
dprintk("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n",
i,
(u32) state->core_frequency,
(u32) state->power,
@@ -381,12 +394,12 @@ static int powernow_acpi_init(void)
if (speed % 1000 > 0)
speed_mhz++;
if ((fid_codes[fid] % 10)==5) {
if ((fid_codes[fid] % 10) == 5) {
if (have_a0 == 1)
powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
invalidate_entry(i);
}
dprintk (" FID: 0x%x (%d.%dx [%dMHz]) "
dprintk(" FID: 0x%x (%d.%dx [%dMHz]) "
"VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
fid_codes[fid] % 10, speed_mhz, vid,
mobile_vid_table[vid]/1000,
@@ -422,7 +435,8 @@ err1:
err05:
kfree(acpi_processor_perf);
err0:
printk(KERN_WARNING PFX "ACPI perflib can not be used in this platform\n");
printk(KERN_WARNING PFX "ACPI perflib can not be used on "
"this platform\n");
acpi_processor_perf = NULL;
return retval;
}
@@ -435,7 +449,14 @@ static int powernow_acpi_init(void)
}
#endif
static int powernow_decode_bios (int maxfid, int startvid)
static void print_pst_entry(struct pst_s *pst, unsigned int j)
{
dprintk("PST:%d (@%p)\n", j, pst);
dprintk(" cpuid: 0x%x fsb: %d maxFID: 0x%x startvid: 0x%x\n",
pst->cpuid, pst->fsbspeed, pst->maxfid, pst->startvid);
}
static int powernow_decode_bios(int maxfid, int startvid)
{
struct psb_s *psb;
struct pst_s *pst;
@@ -446,61 +467,67 @@ static int powernow_decode_bios (int maxfid, int startvid)
etuple = cpuid_eax(0x80000001);
for (i=0xC0000; i < 0xffff0 ; i+=16) {
for (i = 0xC0000; i < 0xffff0 ; i += 16) {
p = phys_to_virt(i);
if (memcmp(p, "AMDK7PNOW!", 10) == 0){
dprintk ("Found PSB header at %p\n", p);
if (memcmp(p, "AMDK7PNOW!", 10) == 0) {
dprintk("Found PSB header at %p\n", p);
psb = (struct psb_s *) p;
dprintk ("Table version: 0x%x\n", psb->tableversion);
dprintk("Table version: 0x%x\n", psb->tableversion);
if (psb->tableversion != 0x12) {
printk (KERN_INFO PFX "Sorry, only v1.2 tables supported right now\n");
printk(KERN_INFO PFX "Sorry, only v1.2 tables"
" supported right now\n");
return -ENODEV;
}
dprintk ("Flags: 0x%x\n", psb->flags);
if ((psb->flags & 1)==0) {
dprintk ("Mobile voltage regulator\n");
} else {
dprintk ("Desktop voltage regulator\n");
}
dprintk("Flags: 0x%x\n", psb->flags);
if ((psb->flags & 1) == 0)
dprintk("Mobile voltage regulator\n");
else
dprintk("Desktop voltage regulator\n");
latency = psb->settlingtime;
if (latency < 100) {
printk(KERN_INFO PFX "BIOS set settling time to %d microseconds. "
"Should be at least 100. Correcting.\n", latency);
printk(KERN_INFO PFX "BIOS set settling time "
"to %d microseconds. "
"Should be at least 100. "
"Correcting.\n", latency);
latency = 100;
}
dprintk ("Settling Time: %d microseconds.\n", psb->settlingtime);
dprintk ("Has %d PST tables. (Only dumping ones relevant to this CPU).\n", psb->numpst);
dprintk("Settling Time: %d microseconds.\n",
psb->settlingtime);
dprintk("Has %d PST tables. (Only dumping ones "
"relevant to this CPU).\n",
psb->numpst);
p += sizeof (struct psb_s);
p += sizeof(struct psb_s);
pst = (struct pst_s *) p;
for (j=0; j<psb->numpst; j++) {
for (j = 0; j < psb->numpst; j++) {
pst = (struct pst_s *) p;
number_scales = pst->numpstates;
if ((etuple == pst->cpuid) && check_fsb(pst->fsbspeed) &&
(maxfid==pst->maxfid) && (startvid==pst->startvid))
{
dprintk ("PST:%d (@%p)\n", j, pst);
dprintk (" cpuid: 0x%x fsb: %d maxFID: 0x%x startvid: 0x%x\n",
pst->cpuid, pst->fsbspeed, pst->maxfid, pst->startvid);
ret = get_ranges ((char *) pst + sizeof (struct pst_s));
if ((etuple == pst->cpuid) &&
check_fsb(pst->fsbspeed) &&
(maxfid == pst->maxfid) &&
(startvid == pst->startvid)) {
print_pst_entry(pst, j);
p = (char *)pst + sizeof(struct pst_s);
ret = get_ranges(p);
return ret;
} else {
unsigned int k;
p = (char *) pst + sizeof (struct pst_s);
for (k=0; k<number_scales; k++)
p+=2;
p = (char *)pst + sizeof(struct pst_s);
for (k = 0; k < number_scales; k++)
p += 2;
}
}
printk (KERN_INFO PFX "No PST tables match this cpuid (0x%x)\n", etuple);
printk (KERN_INFO PFX "This is indicative of a broken BIOS.\n");
printk(KERN_INFO PFX "No PST tables match this cpuid "
"(0x%x)\n", etuple);
printk(KERN_INFO PFX "This is indicative of a broken "
"BIOS.\n");
return -EINVAL;
}
@@ -511,13 +538,14 @@ static int powernow_decode_bios (int maxfid, int startvid)
}
static int powernow_target (struct cpufreq_policy *policy,
static int powernow_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
unsigned int newstate;
if (cpufreq_frequency_table_target(policy, powernow_table, target_freq, relation, &newstate))
if (cpufreq_frequency_table_target(policy, powernow_table, target_freq,
relation, &newstate))
return -EINVAL;
change_speed(newstate);
@@ -526,7 +554,7 @@ static int powernow_target (struct cpufreq_policy *policy,
}
static int powernow_verify (struct cpufreq_policy *policy)
static int powernow_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, powernow_table);
}
@@ -566,18 +594,23 @@ static unsigned int powernow_get(unsigned int cpu)
if (cpu)
return 0;
rdmsrl (MSR_K7_FID_VID_STATUS, fidvidstatus.val);
rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
cfid = fidvidstatus.bits.CFID;
return (fsb * fid_codes[cfid] / 10);
return fsb * fid_codes[cfid] / 10;
}
static int __init acer_cpufreq_pst(const struct dmi_system_id *d)
{
printk(KERN_WARNING "%s laptop with broken PST tables in BIOS detected.\n", d->ident);
printk(KERN_WARNING "You need to downgrade to 3A21 (09/09/2002), or try a newer BIOS than 3A71 (01/20/2003)\n");
printk(KERN_WARNING "cpufreq scaling has been disabled as a result of this.\n");
printk(KERN_WARNING PFX
"%s laptop with broken PST tables in BIOS detected.\n",
d->ident);
printk(KERN_WARNING PFX
"You need to downgrade to 3A21 (09/09/2002), or try a newer "
"BIOS than 3A71 (01/20/2003)\n");
printk(KERN_WARNING PFX
"cpufreq scaling has been disabled as a result of this.\n");
return 0;
}
@@ -598,7 +631,7 @@ static struct dmi_system_id __initdata powernow_dmi_table[] = {
{ }
};
static int __init powernow_cpu_init (struct cpufreq_policy *policy)
static int __init powernow_cpu_init(struct cpufreq_policy *policy)
{
union msr_fidvidstatus fidvidstatus;
int result;
@@ -606,7 +639,7 @@ static int __init powernow_cpu_init (struct cpufreq_policy *policy)
if (policy->cpu != 0)
return -ENODEV;
rdmsrl (MSR_K7_FID_VID_STATUS, fidvidstatus.val);
rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
recalibrate_cpu_khz();
@@ -618,19 +651,21 @@ static int __init powernow_cpu_init (struct cpufreq_policy *policy)
dprintk("FSB: %3dMHz\n", fsb/1000);
if (dmi_check_system(powernow_dmi_table) || acpi_force) {
printk (KERN_INFO PFX "PSB/PST known to be broken. Trying ACPI instead\n");
printk(KERN_INFO PFX "PSB/PST known to be broken. "
"Trying ACPI instead\n");
result = powernow_acpi_init();
} else {
result = powernow_decode_bios(fidvidstatus.bits.MFID, fidvidstatus.bits.SVID);
result = powernow_decode_bios(fidvidstatus.bits.MFID,
fidvidstatus.bits.SVID);
if (result) {
printk (KERN_INFO PFX "Trying ACPI perflib\n");
printk(KERN_INFO PFX "Trying ACPI perflib\n");
maximum_speed = 0;
minimum_speed = -1;
latency = 0;
result = powernow_acpi_init();
if (result) {
printk (KERN_INFO PFX "ACPI and legacy methods failed\n");
printk (KERN_INFO PFX "See http://www.codemonkey.org.uk/projects/cpufreq/powernow-k7.html\n");
printk(KERN_INFO PFX
"ACPI and legacy methods failed\n");
}
} else {
/* SGTC use the bus clock as timer */
@@ -642,10 +677,11 @@ static int __init powernow_cpu_init (struct cpufreq_policy *policy)
if (result)
return result;
printk (KERN_INFO PFX "Minimum speed %d MHz. Maximum speed %d MHz.\n",
printk(KERN_INFO PFX "Minimum speed %d MHz. Maximum speed %d MHz.\n",
minimum_speed/1000, maximum_speed/1000);
policy->cpuinfo.transition_latency = cpufreq_scale(2000000UL, fsb, latency);
policy->cpuinfo.transition_latency =
cpufreq_scale(2000000UL, fsb, latency);
policy->cur = powernow_get(0);
@@ -654,7 +690,8 @@ static int __init powernow_cpu_init (struct cpufreq_policy *policy)
return cpufreq_frequency_table_cpuinfo(policy, powernow_table);
}
static int powernow_cpu_exit (struct cpufreq_policy *policy) {
static int powernow_cpu_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
#ifdef CONFIG_X86_POWERNOW_K7_ACPI
@@ -669,7 +706,7 @@ static int powernow_cpu_exit (struct cpufreq_policy *policy) {
return 0;
}
static struct freq_attr* powernow_table_attr[] = {
static struct freq_attr *powernow_table_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
@@ -685,15 +722,15 @@ static struct cpufreq_driver powernow_driver = {
.attr = powernow_table_attr,
};
static int __init powernow_init (void)
static int __init powernow_init(void)
{
if (check_powernow()==0)
if (check_powernow() == 0)
return -ENODEV;
return cpufreq_register_driver(&powernow_driver);
}
static void __exit powernow_exit (void)
static void __exit powernow_exit(void)
{
cpufreq_unregister_driver(&powernow_driver);
}
@@ -701,9 +738,9 @@ static void __exit powernow_exit (void)
module_param(acpi_force, int, 0444);
MODULE_PARM_DESC(acpi_force, "Force ACPI to be used.");
MODULE_AUTHOR ("Dave Jones <davej@redhat.com>");
MODULE_DESCRIPTION ("Powernow driver for AMD K7 processors.");
MODULE_LICENSE ("GPL");
MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
MODULE_DESCRIPTION("Powernow driver for AMD K7 processors.");
MODULE_LICENSE("GPL");
late_initcall(powernow_init);
module_exit(powernow_exit);

Просмотреть файл

@@ -33,16 +33,14 @@
#include <linux/string.h>
#include <linux/cpumask.h>
#include <linux/sched.h> /* for current / set_cpus_allowed() */
#include <linux/io.h>
#include <linux/delay.h>
#include <asm/msr.h>
#include <asm/io.h>
#include <asm/delay.h>
#ifdef CONFIG_X86_POWERNOW_K8_ACPI
#include <linux/acpi.h>
#include <linux/mutex.h>
#include <acpi/processor.h>
#endif
#define PFX "powernow-k8: "
#define VERSION "version 2.20.00"
@@ -56,7 +54,10 @@ static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
static int cpu_family = CPU_OPTERON;
#ifndef CONFIG_SMP
DEFINE_PER_CPU(cpumask_t, cpu_core_map);
static inline const struct cpumask *cpu_core_mask(int cpu)
{
return cpumask_of(0);
}
#endif
/* Return a frequency in MHz, given an input fid */
@@ -71,7 +72,8 @@ static u32 find_khz_freq_from_fid(u32 fid)
return 1000 * find_freq_from_fid(fid);
}
static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data, u32 pstate)
static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data,
u32 pstate)
{
return data[pstate].frequency;
}
@@ -186,7 +188,9 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid)
return 1;
}
lo = fid | (data->currvid << MSR_C_LO_VID_SHIFT) | MSR_C_LO_INIT_FID_VID;
lo = fid;
lo |= (data->currvid << MSR_C_LO_VID_SHIFT);
lo |= MSR_C_LO_INIT_FID_VID;
dprintk("writing fid 0x%x, lo 0x%x, hi 0x%x\n",
fid, lo, data->plllock * PLL_LOCK_CONVERSION);
@@ -194,7 +198,9 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid)
do {
wrmsr(MSR_FIDVID_CTL, lo, data->plllock * PLL_LOCK_CONVERSION);
if (i++ > 100) {
printk(KERN_ERR PFX "Hardware error - pending bit very stuck - no further pstate changes possible\n");
printk(KERN_ERR PFX
"Hardware error - pending bit very stuck - "
"no further pstate changes possible\n");
return 1;
}
} while (query_current_values_with_pending_wait(data));
@@ -202,14 +208,16 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid)
count_off_irt(data);
if (savevid != data->currvid) {
printk(KERN_ERR PFX "vid change on fid trans, old 0x%x, new 0x%x\n",
savevid, data->currvid);
printk(KERN_ERR PFX
"vid change on fid trans, old 0x%x, new 0x%x\n",
savevid, data->currvid);
return 1;
}
if (fid != data->currfid) {
printk(KERN_ERR PFX "fid trans failed, fid 0x%x, curr 0x%x\n", fid,
data->currfid);
printk(KERN_ERR PFX
"fid trans failed, fid 0x%x, curr 0x%x\n", fid,
data->currfid);
return 1;
}
@@ -228,7 +236,9 @@ static int write_new_vid(struct powernow_k8_data *data, u32 vid)
return 1;
}
lo = data->currfid | (vid << MSR_C_LO_VID_SHIFT) | MSR_C_LO_INIT_FID_VID;
lo = data->currfid;
lo |= (vid << MSR_C_LO_VID_SHIFT);
lo |= MSR_C_LO_INIT_FID_VID;
dprintk("writing vid 0x%x, lo 0x%x, hi 0x%x\n",
vid, lo, STOP_GRANT_5NS);
@@ -236,20 +246,24 @@ static int write_new_vid(struct powernow_k8_data *data, u32 vid)
do {
wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS);
if (i++ > 100) {
printk(KERN_ERR PFX "internal error - pending bit very stuck - no further pstate changes possible\n");
printk(KERN_ERR PFX "internal error - pending bit "
"very stuck - no further pstate "
"changes possible\n");
return 1;
}
} while (query_current_values_with_pending_wait(data));
if (savefid != data->currfid) {
printk(KERN_ERR PFX "fid changed on vid trans, old 0x%x new 0x%x\n",
printk(KERN_ERR PFX "fid changed on vid trans, old "
"0x%x new 0x%x\n",
savefid, data->currfid);
return 1;
}
if (vid != data->currvid) {
printk(KERN_ERR PFX "vid trans failed, vid 0x%x, curr 0x%x\n", vid,
data->currvid);
printk(KERN_ERR PFX "vid trans failed, vid 0x%x, "
"curr 0x%x\n",
vid, data->currvid);
return 1;
}
@@ -261,7 +275,8 @@ static int write_new_vid(struct powernow_k8_data *data, u32 vid)
* Decreasing vid codes represent increasing voltages:
* vid of 0 is 1.550V, vid of 0x1e is 0.800V, vid of VID_OFF is off.
*/
static int decrease_vid_code_by_step(struct powernow_k8_data *data, u32 reqvid, u32 step)
static int decrease_vid_code_by_step(struct powernow_k8_data *data,
u32 reqvid, u32 step)
{
if ((data->currvid - reqvid) > step)
reqvid = data->currvid - step;
@@ -283,7 +298,8 @@ static int transition_pstate(struct powernow_k8_data *data, u32 pstate)
}
/* Change Opteron/Athlon64 fid and vid, by the 3 phases. */
static int transition_fid_vid(struct powernow_k8_data *data, u32 reqfid, u32 reqvid)
static int transition_fid_vid(struct powernow_k8_data *data,
u32 reqfid, u32 reqvid)
{
if (core_voltage_pre_transition(data, reqvid))
return 1;
@@ -298,7 +314,8 @@ static int transition_fid_vid(struct powernow_k8_data *data, u32 reqfid, u32 req
return 1;
if ((reqfid != data->currfid) || (reqvid != data->currvid)) {
printk(KERN_ERR PFX "failed (cpu%d): req 0x%x 0x%x, curr 0x%x 0x%x\n",
printk(KERN_ERR PFX "failed (cpu%d): req 0x%x 0x%x, "
"curr 0x%x 0x%x\n",
smp_processor_id(),
reqfid, reqvid, data->currfid, data->currvid);
return 1;
@@ -311,13 +328,15 @@ static int transition_fid_vid(struct powernow_k8_data *data, u32 reqfid, u32 req
}
/* Phase 1 - core voltage transition ... setup voltage */
static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid)
static int core_voltage_pre_transition(struct powernow_k8_data *data,
u32 reqvid)
{
u32 rvosteps = data->rvo;
u32 savefid = data->currfid;
u32 maxvid, lo;
dprintk("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, reqvid 0x%x, rvo 0x%x\n",
dprintk("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, "
"reqvid 0x%x, rvo 0x%x\n",
smp_processor_id(),
data->currfid, data->currvid, reqvid, data->rvo);
@@ -340,7 +359,7 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid
} else {
dprintk("ph1: changing vid for rvo, req 0x%x\n",
data->currvid - 1);
if (decrease_vid_code_by_step(data, data->currvid - 1, 1))
if (decrease_vid_code_by_step(data, data->currvid-1, 1))
return 1;
rvosteps--;
}
@@ -350,7 +369,8 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid
return 1;
if (savefid != data->currfid) {
printk(KERN_ERR PFX "ph1 err, currfid changed 0x%x\n", data->currfid);
printk(KERN_ERR PFX "ph1 err, currfid changed 0x%x\n",
data->currfid);
return 1;
}
@@ -363,20 +383,24 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid
/* Phase 2 - core frequency transition */
static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid)
{
u32 vcoreqfid, vcocurrfid, vcofiddiff, fid_interval, savevid = data->currvid;
u32 vcoreqfid, vcocurrfid, vcofiddiff;
u32 fid_interval, savevid = data->currvid;
if ((reqfid < HI_FID_TABLE_BOTTOM) && (data->currfid < HI_FID_TABLE_BOTTOM)) {
printk(KERN_ERR PFX "ph2: illegal lo-lo transition 0x%x 0x%x\n",
reqfid, data->currfid);
if ((reqfid < HI_FID_TABLE_BOTTOM) &&
(data->currfid < HI_FID_TABLE_BOTTOM)) {
printk(KERN_ERR PFX "ph2: illegal lo-lo transition "
"0x%x 0x%x\n", reqfid, data->currfid);
return 1;
}
if (data->currfid == reqfid) {
printk(KERN_ERR PFX "ph2 null fid transition 0x%x\n", data->currfid);
printk(KERN_ERR PFX "ph2 null fid transition 0x%x\n",
data->currfid);
return 0;
}
dprintk("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, reqfid 0x%x\n",
dprintk("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, "
"reqfid 0x%x\n",
smp_processor_id(),
data->currfid, data->currvid, reqfid);
@@ -390,14 +414,14 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid)
if (reqfid > data->currfid) {
if (data->currfid > LO_FID_TABLE_TOP) {
if (write_new_fid(data, data->currfid + fid_interval)) {
if (write_new_fid(data,
data->currfid + fid_interval))
return 1;
}
} else {
if (write_new_fid
(data, 2 + convert_fid_to_vco_fid(data->currfid))) {
(data,
2 + convert_fid_to_vco_fid(data->currfid)))
return 1;
}
}
} else {
if (write_new_fid(data, data->currfid - fid_interval))
@@ -417,7 +441,8 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid)
if (data->currfid != reqfid) {
printk(KERN_ERR PFX
"ph2: mismatch, failed fid transition, curr 0x%x, req 0x%x\n",
"ph2: mismatch, failed fid transition, "
"curr 0x%x, req 0x%x\n",
data->currfid, reqfid);
return 1;
}
@@ -435,7 +460,8 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid)
}
/* Phase 3 - core voltage transition flow ... jump to the final vid. */
static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvid)
static int core_voltage_post_transition(struct powernow_k8_data *data,
u32 reqvid)
{
u32 savefid = data->currfid;
u32 savereqvid = reqvid;
@@ -457,7 +483,8 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvi
if (data->currvid != reqvid) {
printk(KERN_ERR PFX
"ph3: failed vid transition\n, req 0x%x, curr 0x%x",
"ph3: failed vid transition\n, "
"req 0x%x, curr 0x%x",
reqvid, data->currvid);
return 1;
}
@@ -508,7 +535,8 @@ static int check_supported_cpu(unsigned int cpu)
if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) {
if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) ||
((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) {
printk(KERN_INFO PFX "Processor cpuid %x not supported\n", eax);
printk(KERN_INFO PFX
"Processor cpuid %x not supported\n", eax);
goto out;
}
@@ -520,8 +548,10 @@ static int check_supported_cpu(unsigned int cpu)
}
cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
if ((edx & P_STATE_TRANSITION_CAPABLE) != P_STATE_TRANSITION_CAPABLE) {
printk(KERN_INFO PFX "Power state transitions not supported\n");
if ((edx & P_STATE_TRANSITION_CAPABLE)
!= P_STATE_TRANSITION_CAPABLE) {
printk(KERN_INFO PFX
"Power state transitions not supported\n");
goto out;
}
} else { /* must be a HW Pstate capable processor */
@@ -539,7 +569,8 @@ out:
return rc;
}
static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, u8 maxvid)
static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst,
u8 maxvid)
{
unsigned int j;
u8 lastfid = 0xff;
@@ -550,12 +581,14 @@ static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, u8
j, pst[j].vid);
return -EINVAL;
}
if (pst[j].vid < data->rvo) { /* vid + rvo >= 0 */
if (pst[j].vid < data->rvo) {
/* vid + rvo >= 0 */
printk(KERN_ERR FW_BUG PFX "0 vid exceeded with pstate"
" %d\n", j);
return -ENODEV;
}
if (pst[j].vid < maxvid + data->rvo) { /* vid + rvo >= maxvid */
if (pst[j].vid < maxvid + data->rvo) {
/* vid + rvo >= maxvid */
printk(KERN_ERR FW_BUG PFX "maxvid exceeded with pstate"
" %d\n", j);
return -ENODEV;
@@ -579,23 +612,31 @@ static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, u8
return -EINVAL;
}
if (lastfid > LO_FID_TABLE_TOP)
printk(KERN_INFO FW_BUG PFX "first fid not from lo freq table\n");
printk(KERN_INFO FW_BUG PFX
"first fid not from lo freq table\n");
return 0;
}
static void invalidate_entry(struct powernow_k8_data *data, unsigned int entry)
{
data->powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID;
}
static void print_basics(struct powernow_k8_data *data)
{
int j;
for (j = 0; j < data->numps; j++) {
if (data->powernow_table[j].frequency != CPUFREQ_ENTRY_INVALID) {
if (data->powernow_table[j].frequency !=
CPUFREQ_ENTRY_INVALID) {
if (cpu_family == CPU_HW_PSTATE) {
printk(KERN_INFO PFX " %d : pstate %d (%d MHz)\n",
j,
printk(KERN_INFO PFX
" %d : pstate %d (%d MHz)\n", j,
data->powernow_table[j].index,
data->powernow_table[j].frequency/1000);
} else {
printk(KERN_INFO PFX " %d : fid 0x%x (%d MHz), vid 0x%x\n",
printk(KERN_INFO PFX
" %d : fid 0x%x (%d MHz), vid 0x%x\n",
j,
data->powernow_table[j].index & 0xff,
data->powernow_table[j].frequency/1000,
@@ -604,20 +645,25 @@ static void print_basics(struct powernow_k8_data *data)
}
}
if (data->batps)
printk(KERN_INFO PFX "Only %d pstates on battery\n", data->batps);
printk(KERN_INFO PFX "Only %d pstates on battery\n",
data->batps);
}
static int fill_powernow_table(struct powernow_k8_data *data, struct pst_s *pst, u8 maxvid)
static int fill_powernow_table(struct powernow_k8_data *data,
struct pst_s *pst, u8 maxvid)
{
struct cpufreq_frequency_table *powernow_table;
unsigned int j;
if (data->batps) { /* use ACPI support to get full speed on mains power */
printk(KERN_WARNING PFX "Only %d pstates usable (use ACPI driver for full range\n", data->batps);
if (data->batps) {
/* use ACPI support to get full speed on mains power */
printk(KERN_WARNING PFX
"Only %d pstates usable (use ACPI driver for full "
"range\n", data->batps);
data->numps = data->batps;
}
for ( j=1; j<data->numps; j++ ) {
for (j = 1; j < data->numps; j++) {
if (pst[j-1].fid >= pst[j].fid) {
printk(KERN_ERR PFX "PST out of sequence\n");
return -EINVAL;
@@ -640,9 +686,11 @@ static int fill_powernow_table(struct powernow_k8_data *data, struct pst_s *pst,
}
for (j = 0; j < data->numps; j++) {
int freq;
powernow_table[j].index = pst[j].fid; /* lower 8 bits */
powernow_table[j].index |= (pst[j].vid << 8); /* upper 8 bits */
powernow_table[j].frequency = find_khz_freq_from_fid(pst[j].fid);
freq = find_khz_freq_from_fid(pst[j].fid);
powernow_table[j].frequency = freq;
}
powernow_table[data->numps].frequency = CPUFREQ_TABLE_END;
powernow_table[data->numps].index = 0;
@@ -654,11 +702,12 @@ static int fill_powernow_table(struct powernow_k8_data *data, struct pst_s *pst,
dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid);
data->powernow_table = powernow_table;
if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu)
if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu)
print_basics(data);
for (j = 0; j < data->numps; j++)
if ((pst[j].fid==data->currfid) && (pst[j].vid==data->currvid))
if ((pst[j].fid == data->currfid) &&
(pst[j].vid == data->currvid))
return 0;
dprintk("currfid/vid do not match PST, ignoring\n");
@@ -698,7 +747,8 @@ static int find_psb_table(struct powernow_k8_data *data)
}
data->vstable = psb->vstable;
dprintk("voltage stabilization time: %d(*20us)\n", data->vstable);
dprintk("voltage stabilization time: %d(*20us)\n",
data->vstable);
dprintk("flags2: 0x%x\n", psb->flags2);
data->rvo = psb->flags2 & 3;
@@ -713,11 +763,12 @@ static int find_psb_table(struct powernow_k8_data *data)
dprintk("numpst: 0x%x\n", psb->num_tables);
cpst = psb->num_tables;
if ((psb->cpuid == 0x00000fc0) || (psb->cpuid == 0x00000fe0) ){
if ((psb->cpuid == 0x00000fc0) ||
(psb->cpuid == 0x00000fe0)) {
thiscpuid = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
if ((thiscpuid == 0x00000fc0) || (thiscpuid == 0x00000fe0) ) {
if ((thiscpuid == 0x00000fc0) ||
(thiscpuid == 0x00000fe0))
cpst = 1;
}
}
if (cpst != 1) {
printk(KERN_ERR FW_BUG PFX "numpst must be 1\n");
@@ -732,7 +783,8 @@ static int find_psb_table(struct powernow_k8_data *data)
data->numps = psb->numps;
dprintk("numpstates: 0x%x\n", data->numps);
return fill_powernow_table(data, (struct pst_s *)(psb+1), maxvid);
return fill_powernow_table(data,
(struct pst_s *)(psb+1), maxvid);
}
/*
* If you see this message, complain to BIOS manufacturer. If
@@ -745,28 +797,31 @@ static int find_psb_table(struct powernow_k8_data *data)
* BIOS and Kernel Developer's Guide, which is available on
* www.amd.com
*/
printk(KERN_ERR PFX "BIOS error - no PSB or ACPI _PSS objects\n");
printk(KERN_ERR FW_BUG PFX "No PSB or ACPI _PSS objects\n");
return -ENODEV;
}
#ifdef CONFIG_X86_POWERNOW_K8_ACPI
static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index)
static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data,
unsigned int index)
{
acpi_integer control;
if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE))
return;
data->irt = (data->acpi_data.states[index].control >> IRT_SHIFT) & IRT_MASK;
data->rvo = (data->acpi_data.states[index].control >> RVO_SHIFT) & RVO_MASK;
data->exttype = (data->acpi_data.states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK;
data->plllock = (data->acpi_data.states[index].control >> PLL_L_SHIFT) & PLL_L_MASK;
data->vidmvs = 1 << ((data->acpi_data.states[index].control >> MVS_SHIFT) & MVS_MASK);
data->vstable = (data->acpi_data.states[index].control >> VST_SHIFT) & VST_MASK;
}
control = data->acpi_data.states[index].control; data->irt = (control
>> IRT_SHIFT) & IRT_MASK; data->rvo = (control >>
RVO_SHIFT) & RVO_MASK; data->exttype = (control
>> EXT_TYPE_SHIFT) & EXT_TYPE_MASK;
data->plllock = (control >> PLL_L_SHIFT) & PLL_L_MASK; data->vidmvs = 1
<< ((control >> MVS_SHIFT) & MVS_MASK); data->vstable =
(control >> VST_SHIFT) & VST_MASK; }
static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
{
struct cpufreq_frequency_table *powernow_table;
int ret_val = -ENODEV;
acpi_integer space_id;
if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) {
dprintk("register performance failed: bad ACPI data\n");
@@ -779,11 +834,12 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
goto err_out;
}
if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
(data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
space_id = data->acpi_data.control_register.space_id;
if ((space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
(space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
dprintk("Invalid control/status registers (%x - %x)\n",
data->acpi_data.control_register.space_id,
data->acpi_data.status_register.space_id);
space_id);
goto err_out;
}
@@ -802,13 +858,14 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
if (ret_val)
goto err_out_mem;
powernow_table[data->acpi_data.state_count].frequency = CPUFREQ_TABLE_END;
powernow_table[data->acpi_data.state_count].frequency =
CPUFREQ_TABLE_END;
powernow_table[data->acpi_data.state_count].index = 0;
data->powernow_table = powernow_table;
/* fill in data */
data->numps = data->acpi_data.state_count;
if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu)
if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu)
print_basics(data);
powernow_k8_acpi_pst_values(data, 0);
@@ -830,13 +887,15 @@ err_out_mem:
err_out:
acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
/* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */
/* data->acpi_data.state_count informs us at ->exit()
* whether ACPI was used */
data->acpi_data.state_count = 0;
return ret_val;
}
static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table)
static int fill_powernow_table_pstate(struct powernow_k8_data *data,
struct cpufreq_frequency_table *powernow_table)
{
int i;
u32 hi = 0, lo = 0;
@@ -848,84 +907,101 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpuf
index = data->acpi_data.states[i].control & HW_PSTATE_MASK;
if (index > data->max_hw_pstate) {
printk(KERN_ERR PFX "invalid pstate %d - bad value %d.\n", i, index);
printk(KERN_ERR PFX "Please report to BIOS manufacturer\n");
powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
printk(KERN_ERR PFX "invalid pstate %d - "
"bad value %d.\n", i, index);
printk(KERN_ERR PFX "Please report to BIOS "
"manufacturer\n");
invalidate_entry(data, i);
continue;
}
rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi);
if (!(hi & HW_PSTATE_VALID_MASK)) {
dprintk("invalid pstate %d, ignoring\n", index);
powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
invalidate_entry(data, i);
continue;
}
powernow_table[i].index = index;
powernow_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000;
powernow_table[i].frequency =
data->acpi_data.states[i].core_frequency * 1000;
}
return 0;
}
static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table)
static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
struct cpufreq_frequency_table *powernow_table)
{
int i;
int cntlofreq = 0;
for (i = 0; i < data->acpi_data.state_count; i++) {
u32 fid;
u32 vid;
u32 freq, index;
acpi_integer status, control;
if (data->exttype) {
fid = data->acpi_data.states[i].status & EXT_FID_MASK;
vid = (data->acpi_data.states[i].status >> VID_SHIFT) & EXT_VID_MASK;
status = data->acpi_data.states[i].status;
fid = status & EXT_FID_MASK;
vid = (status >> VID_SHIFT) & EXT_VID_MASK;
} else {
fid = data->acpi_data.states[i].control & FID_MASK;
vid = (data->acpi_data.states[i].control >> VID_SHIFT) & VID_MASK;
control = data->acpi_data.states[i].control;
fid = control & FID_MASK;
vid = (control >> VID_SHIFT) & VID_MASK;
}
dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid);
powernow_table[i].index = fid; /* lower 8 bits */
powernow_table[i].index |= (vid << 8); /* upper 8 bits */
powernow_table[i].frequency = find_khz_freq_from_fid(fid);
index = fid | (vid<<8);
powernow_table[i].index = index;
freq = find_khz_freq_from_fid(fid);
powernow_table[i].frequency = freq;
/* verify frequency is OK */
if ((powernow_table[i].frequency > (MAX_FREQ * 1000)) ||
(powernow_table[i].frequency < (MIN_FREQ * 1000))) {
dprintk("invalid freq %u kHz, ignoring\n", powernow_table[i].frequency);
powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
if ((freq > (MAX_FREQ * 1000)) || (freq < (MIN_FREQ * 1000))) {
dprintk("invalid freq %u kHz, ignoring\n", freq);
invalidate_entry(data, i);
continue;
}
/* verify voltage is OK - BIOSs are using "off" to indicate invalid */
/* verify voltage is OK -
* BIOSs are using "off" to indicate invalid */
if (vid == VID_OFF) {
dprintk("invalid vid %u, ignoring\n", vid);
powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
invalidate_entry(data, i);
continue;
}
/* verify only 1 entry from the lo frequency table */
if (fid < HI_FID_TABLE_BOTTOM) {
if (cntlofreq) {
/* if both entries are the same, ignore this one ... */
if ((powernow_table[i].frequency != powernow_table[cntlofreq].frequency) ||
(powernow_table[i].index != powernow_table[cntlofreq].index)) {
printk(KERN_ERR PFX "Too many lo freq table entries\n");
/* if both entries are the same,
* ignore this one ... */
if ((freq != powernow_table[cntlofreq].frequency) ||
(index != powernow_table[cntlofreq].index)) {
printk(KERN_ERR PFX
"Too many lo freq table "
"entries\n");
return 1;
}
dprintk("double low frequency table entry, ignoring it.\n");
powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
dprintk("double low frequency table entry, "
"ignoring it.\n");
invalidate_entry(data, i);
continue;
} else
cntlofreq = i;
}
if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) {
printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n",
powernow_table[i].frequency,
(unsigned int) (data->acpi_data.states[i].core_frequency * 1000));
powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
if (freq != (data->acpi_data.states[i].core_frequency * 1000)) {
printk(KERN_INFO PFX "invalid freq entries "
"%u kHz vs. %u kHz\n", freq,
(unsigned int)
(data->acpi_data.states[i].core_frequency
* 1000));
invalidate_entry(data, i);
continue;
}
}
@@ -935,7 +1011,8 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf
static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
{
if (data->acpi_data.state_count)
acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
acpi_processor_unregister_performance(&data->acpi_data,
data->cpu);
free_cpumask_var(data->acpi_data.shared_cpu_map);
}
@@ -953,15 +1030,9 @@ static int get_transition_latency(struct powernow_k8_data *data)
return 1000 * max_latency;
}
#else
static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; }
static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; }
static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; }
static int get_transition_latency(struct powernow_k8_data *data) { return 0; }
#endif /* CONFIG_X86_POWERNOW_K8_ACPI */
/* Take a frequency, and issue the fid/vid transition command */
static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned int index)
static int transition_frequency_fidvid(struct powernow_k8_data *data,
unsigned int index)
{
u32 fid = 0;
u32 vid = 0;
@@ -989,7 +1060,8 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i
return 0;
}
if ((fid < HI_FID_TABLE_BOTTOM) && (data->currfid < HI_FID_TABLE_BOTTOM)) {
if ((fid < HI_FID_TABLE_BOTTOM) &&
(data->currfid < HI_FID_TABLE_BOTTOM)) {
printk(KERN_ERR PFX
"ignoring illegal change in lo freq table-%x to 0x%x\n",
data->currfid, fid);
@@ -1017,7 +1089,8 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i
}
/* Take a frequency, and issue the hardware pstate transition command */
static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned int index)
static int transition_frequency_pstate(struct powernow_k8_data *data,
unsigned int index)
{
u32 pstate = 0;
int res, i;
@@ -1029,7 +1102,8 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
pstate = index & HW_PSTATE_MASK;
if (pstate > data->max_hw_pstate)
return 0;
freqs.old = find_khz_freq_from_pstate(data->powernow_table, data->currpstate);
freqs.old = find_khz_freq_from_pstate(data->powernow_table,
data->currpstate);
freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
for_each_cpu_mask_nr(i, *(data->available_cores)) {
@@ -1048,7 +1122,8 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
}
/* Driver entry point to switch to the target frequency */
static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation)
static int powernowk8_target(struct cpufreq_policy *pol,
unsigned targfreq, unsigned relation)
{
cpumask_t oldmask;
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
@@ -1087,14 +1162,18 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
dprintk("targ: curr fid 0x%x, vid 0x%x\n",
data->currfid, data->currvid);
if ((checkvid != data->currvid) || (checkfid != data->currfid)) {
if ((checkvid != data->currvid) ||
(checkfid != data->currfid)) {
printk(KERN_INFO PFX
"error - out of sync, fix 0x%x 0x%x, vid 0x%x 0x%x\n",
checkfid, data->currfid, checkvid, data->currvid);
"error - out of sync, fix 0x%x 0x%x, "
"vid 0x%x 0x%x\n",
checkfid, data->currfid,
checkvid, data->currvid);
}
}
if (cpufreq_frequency_table_target(pol, data->powernow_table, targfreq, relation, &newstate))
if (cpufreq_frequency_table_target(pol, data->powernow_table,
targfreq, relation, &newstate))
goto err_out;
mutex_lock(&fidvid_mutex);
@@ -1114,7 +1193,8 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
mutex_unlock(&fidvid_mutex);
if (cpu_family == CPU_HW_PSTATE)
pol->cur = find_khz_freq_from_pstate(data->powernow_table, newstate);
pol->cur = find_khz_freq_from_pstate(data->powernow_table,
newstate);
else
pol->cur = find_khz_freq_from_fid(data->currfid);
ret = 0;
@@ -1141,6 +1221,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
struct powernow_k8_data *data;
cpumask_t oldmask;
int rc;
static int print_once;
if (!cpu_online(pol->cpu))
return -ENODEV;
@@ -1163,33 +1244,31 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
* an UP version, and is deprecated by AMD.
*/
if (num_online_cpus() != 1) {
#ifndef CONFIG_ACPI_PROCESSOR
printk(KERN_ERR PFX "ACPI Processor support is required "
"for SMP systems but is absent. Please load the "
"ACPI Processor module before starting this "
"driver.\n");
#else
printk(KERN_ERR FW_BUG PFX "Your BIOS does not provide"
" ACPI _PSS objects in a way that Linux "
"understands. Please report this to the Linux "
"ACPI maintainers and complain to your BIOS "
"vendor.\n");
#endif
kfree(data);
return -ENODEV;
/*
* Replace this one with print_once as soon as such a
* thing gets introduced
*/
if (!print_once) {
WARN_ONCE(1, KERN_ERR FW_BUG PFX "Your BIOS "
"does not provide ACPI _PSS objects "
"in a way that Linux understands. "
"Please report this to the Linux ACPI"
" maintainers and complain to your "
"BIOS vendor.\n");
print_once++;
}
goto err_out;
}
if (pol->cpu != 0) {
printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for "
"CPU other than CPU0. Complain to your BIOS "
"vendor.\n");
kfree(data);
return -ENODEV;
goto err_out;
}
rc = find_psb_table(data);
if (rc) {
kfree(data);
return -ENODEV;
}
if (rc)
goto err_out;
/* Take a crude guess here.
* That guess was in microseconds, so multiply with 1000 */
pol->cpuinfo.transition_latency = (
@@ -1204,16 +1283,16 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
if (smp_processor_id() != pol->cpu) {
printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
goto err_out;
goto err_out_unmask;
}
if (pending_bit_stuck()) {
printk(KERN_ERR PFX "failing init, change pending bit set\n");
goto err_out;
goto err_out_unmask;
}
if (query_current_values_with_pending_wait(data))
goto err_out;
goto err_out_unmask;
if (cpu_family == CPU_OPTERON)
fidvid_msr_init();
@@ -1224,11 +1303,12 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
if (cpu_family == CPU_HW_PSTATE)
cpumask_copy(pol->cpus, cpumask_of(pol->cpu));
else
cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu));
cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
data->available_cores = pol->cpus;
if (cpu_family == CPU_HW_PSTATE)
pol->cur = find_khz_freq_from_pstate(data->powernow_table, data->currpstate);
pol->cur = find_khz_freq_from_pstate(data->powernow_table,
data->currpstate);
else
pol->cur = find_khz_freq_from_fid(data->currfid);
dprintk("policy current frequency %d kHz\n", pol->cur);
@@ -1245,7 +1325,8 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
if (cpu_family == CPU_HW_PSTATE)
dprintk("cpu_init done, current pstate 0x%x\n", data->currpstate);
dprintk("cpu_init done, current pstate 0x%x\n",
data->currpstate);
else
dprintk("cpu_init done, current fid 0x%x, vid 0x%x\n",
data->currfid, data->currvid);
@@ -1254,15 +1335,16 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
return 0;
err_out:
err_out_unmask:
set_cpus_allowed_ptr(current, &oldmask);
powernow_k8_cpu_exit_acpi(data);
err_out:
kfree(data);
return -ENODEV;
}
static int __devexit powernowk8_cpu_exit (struct cpufreq_policy *pol)
static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol)
{
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
@@ -1279,14 +1361,14 @@ static int __devexit powernowk8_cpu_exit (struct cpufreq_policy *pol)
return 0;
}
static unsigned int powernowk8_get (unsigned int cpu)
static unsigned int powernowk8_get(unsigned int cpu)
{
struct powernow_k8_data *data;
cpumask_t oldmask = current->cpus_allowed;
unsigned int khz = 0;
unsigned int first;
first = first_cpu(per_cpu(cpu_core_map, cpu));
first = cpumask_first(cpu_core_mask(cpu));
data = per_cpu(powernow_data, first);
if (!data)
@@ -1315,7 +1397,7 @@ out:
return khz;
}
static struct freq_attr* powernow_k8_attr[] = {
static struct freq_attr *powernow_k8_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
@@ -1360,7 +1442,8 @@ static void __exit powernowk8_exit(void)
cpufreq_unregister_driver(&cpufreq_amd64_driver);
}
MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and Mark Langsdorf <mark.langsdorf@amd.com>");
MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and "
"Mark Langsdorf <mark.langsdorf@amd.com>");
MODULE_DESCRIPTION("AMD Athlon 64 and Opteron processor frequency driver.");
MODULE_LICENSE("GPL");

Просмотреть файл

@@ -45,11 +45,10 @@ struct powernow_k8_data {
* frequency is in kHz */
struct cpufreq_frequency_table *powernow_table;
#ifdef CONFIG_X86_POWERNOW_K8_ACPI
/* the acpi table needs to be kept. it's only available if ACPI was
* used to determine valid frequency/vid/fid states */
struct acpi_processor_performance acpi_data;
#endif
/* we need to keep track of associated cores, but let cpufreq
* handle hotplug events - so just point at cpufreq pol->cpus
* structure */
@@ -222,10 +221,8 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid);
static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index);
#ifdef CONFIG_X86_POWERNOW_K8_ACPI
static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);
static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);
#endif
#ifdef CONFIG_SMP
static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[])

Просмотреть файл

@@ -19,17 +19,19 @@
#include <linux/delay.h>
#include <linux/cpufreq.h>
#include <linux/timex.h>
#include <linux/io.h>
#include <asm/msr.h>
#include <asm/timex.h>
#include <asm/io.h>
#define MMCR_BASE 0xfffef000 /* The default base address */
#define OFFS_CPUCTL 0x2 /* CPU Control Register */
static __u8 __iomem *cpuctl;
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "sc520_freq", msg)
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
"sc520_freq", msg)
#define PFX "sc520_freq: "
static struct cpufreq_frequency_table sc520_freq_table[] = {
{0x01, 100000},
@@ -43,7 +45,8 @@ static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu)
switch (clockspeed_reg & 0x03) {
default:
printk(KERN_ERR "sc520_freq: error: cpuctl register has unexpected value %02x\n", clockspeed_reg);
printk(KERN_ERR PFX "error: cpuctl register has unexpected "
"value %02x\n", clockspeed_reg);
case 0x01:
return 100000;
case 0x02:
@@ -51,7 +54,7 @@ static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu)
}
}
static void sc520_freq_set_cpu_state (unsigned int state)
static void sc520_freq_set_cpu_state(unsigned int state)
{
struct cpufreq_freqs freqs;
@@ -76,18 +79,19 @@ static void sc520_freq_set_cpu_state (unsigned int state)
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
};
static int sc520_freq_verify (struct cpufreq_policy *policy)
static int sc520_freq_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, &sc520_freq_table[0]);
}
static int sc520_freq_target (struct cpufreq_policy *policy,
static int sc520_freq_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
unsigned int newstate = 0;
if (cpufreq_frequency_table_target(policy, sc520_freq_table, target_freq, relation, &newstate))
if (cpufreq_frequency_table_target(policy, sc520_freq_table,
target_freq, relation, &newstate))
return -EINVAL;
sc520_freq_set_cpu_state(newstate);
@@ -116,7 +120,7 @@ static int sc520_freq_cpu_init(struct cpufreq_policy *policy)
result = cpufreq_frequency_table_cpuinfo(policy, sc520_freq_table);
if (result)
return (result);
return result;
cpufreq_frequency_table_get_attr(sc520_freq_table, policy->cpu);
@@ -131,7 +135,7 @@ static int sc520_freq_cpu_exit(struct cpufreq_policy *policy)
}
static struct freq_attr* sc520_freq_attr[] = {
static struct freq_attr *sc520_freq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
@@ -155,13 +159,13 @@ static int __init sc520_freq_init(void)
int err;
/* Test if we have the right hardware */
if(c->x86_vendor != X86_VENDOR_AMD ||
c->x86 != 4 || c->x86_model != 9) {
if (c->x86_vendor != X86_VENDOR_AMD ||
c->x86 != 4 || c->x86_model != 9) {
dprintk("no Elan SC520 processor found!\n");
return -ENODEV;
}
cpuctl = ioremap((unsigned long)(MMCR_BASE + OFFS_CPUCTL), 1);
if(!cpuctl) {
if (!cpuctl) {
printk(KERN_ERR "sc520_freq: error: failed to remap memory\n");
return -ENOMEM;
}

Просмотреть файл

@@ -39,7 +39,7 @@ static struct pci_dev *speedstep_chipset_dev;
/* speedstep_processor
*/
static unsigned int speedstep_processor = 0;
static unsigned int speedstep_processor;
static u32 pmbase;
@@ -54,7 +54,8 @@ static struct cpufreq_frequency_table speedstep_freqs[] = {
};
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-ich", msg)
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
"speedstep-ich", msg)
/**
@@ -62,7 +63,7 @@ static struct cpufreq_frequency_table speedstep_freqs[] = {
*
* Returns: -ENODEV if no register could be found
*/
static int speedstep_find_register (void)
static int speedstep_find_register(void)
{
if (!speedstep_chipset_dev)
return -ENODEV;
@@ -90,7 +91,7 @@ static int speedstep_find_register (void)
*
* Tries to change the SpeedStep state.
*/
static void speedstep_set_state (unsigned int state)
static void speedstep_set_state(unsigned int state)
{
u8 pm2_blk;
u8 value;
@@ -133,11 +134,11 @@ static void speedstep_set_state (unsigned int state)
dprintk("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value);
if (state == (value & 0x1)) {
dprintk("change to %u MHz succeeded\n", (speedstep_get_processor_frequency(speedstep_processor) / 1000));
} else {
printk (KERN_ERR "cpufreq: change failed - I/O error\n");
}
if (state == (value & 0x1))
dprintk("change to %u MHz succeeded\n",
speedstep_get_frequency(speedstep_processor) / 1000);
else
printk(KERN_ERR "cpufreq: change failed - I/O error\n");
return;
}
@@ -149,7 +150,7 @@ static void speedstep_set_state (unsigned int state)
* Tries to activate the SpeedStep status and control registers.
* Returns -EINVAL on an unsupported chipset, and zero on success.
*/
static int speedstep_activate (void)
static int speedstep_activate(void)
{
u16 value = 0;
@@ -175,20 +176,18 @@ static int speedstep_activate (void)
* functions. Returns the SPEEDSTEP_CHIPSET_-number for the detected
* chipset, or zero on failure.
*/
static unsigned int speedstep_detect_chipset (void)
static unsigned int speedstep_detect_chipset(void)
{
speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82801DB_12,
PCI_ANY_ID,
PCI_ANY_ID,
PCI_ANY_ID, PCI_ANY_ID,
NULL);
if (speedstep_chipset_dev)
return 4; /* 4-M */
speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82801CA_12,
PCI_ANY_ID,
PCI_ANY_ID,
PCI_ANY_ID, PCI_ANY_ID,
NULL);
if (speedstep_chipset_dev)
return 3; /* 3-M */
@@ -196,8 +195,7 @@ static unsigned int speedstep_detect_chipset (void)
speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82801BA_10,
PCI_ANY_ID,
PCI_ANY_ID,
PCI_ANY_ID, PCI_ANY_ID,
NULL);
if (speedstep_chipset_dev) {
/* speedstep.c causes lockups on Dell Inspirons 8000 and
@@ -208,8 +206,7 @@ static unsigned int speedstep_detect_chipset (void)
hostbridge = pci_get_subsys(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82815_MC,
PCI_ANY_ID,
PCI_ANY_ID,
PCI_ANY_ID, PCI_ANY_ID,
NULL);
if (!hostbridge)
@@ -236,7 +233,7 @@ static unsigned int _speedstep_get(const struct cpumask *cpus)
cpus_allowed = current->cpus_allowed;
set_cpus_allowed_ptr(current, cpus);
speed = speedstep_get_processor_frequency(speedstep_processor);
speed = speedstep_get_frequency(speedstep_processor);
set_cpus_allowed_ptr(current, &cpus_allowed);
dprintk("detected %u kHz as current frequency\n", speed);
return speed;
@@ -251,11 +248,12 @@ static unsigned int speedstep_get(unsigned int cpu)
* speedstep_target - set a new CPUFreq policy
* @policy: new policy
* @target_freq: the target frequency
* @relation: how that frequency relates to achieved frequency (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
* @relation: how that frequency relates to achieved frequency
* (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
*
* Sets a new CPUFreq policy.
*/
static int speedstep_target (struct cpufreq_policy *policy,
static int speedstep_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
@@ -264,7 +262,8 @@ static int speedstep_target (struct cpufreq_policy *policy,
cpumask_t cpus_allowed;
int i;
if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate))
if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0],
target_freq, relation, &newstate))
return -EINVAL;
freqs.old = _speedstep_get(policy->cpus);
@@ -308,7 +307,7 @@ static int speedstep_target (struct cpufreq_policy *policy,
* Limit must be within speedstep_low_freq and speedstep_high_freq, with
* at least one border included.
*/
static int speedstep_verify (struct cpufreq_policy *policy)
static int speedstep_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]);
}
@@ -322,7 +321,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
/* only run on CPU to be set, or on its sibling */
#ifdef CONFIG_SMP
cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu));
cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
#endif
cpus_allowed = current->cpus_allowed;
@@ -344,7 +343,8 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
return -EIO;
dprintk("currently at %s speed setting - %i MHz\n",
(speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) ? "low" : "high",
(speed == speedstep_freqs[SPEEDSTEP_LOW].frequency)
? "low" : "high",
(speed / 1000));
/* cpuinfo and default policy values */
@@ -352,9 +352,9 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs);
if (result)
return (result);
return result;
cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu);
cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu);
return 0;
}
@@ -366,7 +366,7 @@ static int speedstep_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
static struct freq_attr* speedstep_attr[] = {
static struct freq_attr *speedstep_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
@@ -396,13 +396,15 @@ static int __init speedstep_init(void)
/* detect processor */
speedstep_processor = speedstep_detect_processor();
if (!speedstep_processor) {
dprintk("Intel(R) SpeedStep(TM) capable processor not found\n");
dprintk("Intel(R) SpeedStep(TM) capable processor "
"not found\n");
return -ENODEV;
}
/* detect chipset */
if (!speedstep_detect_chipset()) {
dprintk("Intel(R) SpeedStep(TM) for this chipset not (yet) available.\n");
dprintk("Intel(R) SpeedStep(TM) for this chipset not "
"(yet) available.\n");
return -ENODEV;
}
@@ -431,9 +433,11 @@ static void __exit speedstep_exit(void)
}
MODULE_AUTHOR ("Dave Jones <davej@redhat.com>, Dominik Brodowski <linux@brodo.de>");
MODULE_DESCRIPTION ("Speedstep driver for Intel mobile processors on chipsets with ICH-M southbridges.");
MODULE_LICENSE ("GPL");
MODULE_AUTHOR("Dave Jones <davej@redhat.com>, "
"Dominik Brodowski <linux@brodo.de>");
MODULE_DESCRIPTION("Speedstep driver for Intel mobile processors on chipsets "
"with ICH-M southbridges.");
MODULE_LICENSE("GPL");
module_init(speedstep_init);
module_exit(speedstep_exit);

Просмотреть файл

@@ -16,12 +16,16 @@
#include <linux/slab.h>
#include <asm/msr.h>
#include <asm/tsc.h>
#include "speedstep-lib.h"
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-lib", msg)
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
"speedstep-lib", msg)
#define PFX "speedstep-lib: "
#ifdef CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK
static int relaxed_check = 0;
static int relaxed_check;
#else
#define relaxed_check 0
#endif
@@ -30,14 +34,14 @@ static int relaxed_check = 0;
* GET PROCESSOR CORE SPEED IN KHZ *
*********************************************************************/
static unsigned int pentium3_get_frequency (unsigned int processor)
static unsigned int pentium3_get_frequency(unsigned int processor)
{
/* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */
/* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */
struct {
unsigned int ratio; /* Frequency Multiplier (x10) */
u8 bitmap; /* power on configuration bits
[27, 25:22] (in MSR 0x2a) */
} msr_decode_mult [] = {
} msr_decode_mult[] = {
{ 30, 0x01 },
{ 35, 0x05 },
{ 40, 0x02 },
@@ -52,7 +56,7 @@ static unsigned int pentium3_get_frequency (unsigned int processor)
{ 85, 0x26 },
{ 90, 0x20 },
{ 100, 0x2b },
{ 0, 0xff } /* error or unknown value */
{ 0, 0xff } /* error or unknown value */
};
/* PIII(-M) FSB settings: see table b1-b of 24547206.pdf */
@@ -60,7 +64,7 @@ static unsigned int pentium3_get_frequency (unsigned int processor)
unsigned int value; /* Front Side Bus speed in MHz */
u8 bitmap; /* power on configuration bits [18: 19]
(in MSR 0x2a) */
} msr_decode_fsb [] = {
} msr_decode_fsb[] = {
{ 66, 0x0 },
{ 100, 0x2 },
{ 133, 0x1 },
@@ -85,7 +89,7 @@ static unsigned int pentium3_get_frequency (unsigned int processor)
}
/* decode the multiplier */
if (processor == SPEEDSTEP_PROCESSOR_PIII_C_EARLY) {
if (processor == SPEEDSTEP_CPU_PIII_C_EARLY) {
dprintk("workaround for early PIIIs\n");
msr_lo &= 0x03c00000;
} else
@@ -97,9 +101,10 @@ static unsigned int pentium3_get_frequency (unsigned int processor)
j++;
}
dprintk("speed is %u\n", (msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100));
dprintk("speed is %u\n",
(msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100));
return (msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100);
return msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100;
}
@@ -112,20 +117,23 @@ static unsigned int pentiumM_get_frequency(void)
/* see table B-2 of 24547212.pdf */
if (msr_lo & 0x00040000) {
printk(KERN_DEBUG "speedstep-lib: PM - invalid FSB: 0x%x 0x%x\n", msr_lo, msr_tmp);
printk(KERN_DEBUG PFX "PM - invalid FSB: 0x%x 0x%x\n",
msr_lo, msr_tmp);
return 0;
}
msr_tmp = (msr_lo >> 22) & 0x1f;
dprintk("bits 22-26 are 0x%x, speed is %u\n", msr_tmp, (msr_tmp * 100 * 1000));
dprintk("bits 22-26 are 0x%x, speed is %u\n",
msr_tmp, (msr_tmp * 100 * 1000));
return (msr_tmp * 100 * 1000);
return msr_tmp * 100 * 1000;
}
static unsigned int pentium_core_get_frequency(void)
{
u32 fsb = 0;
u32 msr_lo, msr_tmp;
int ret;
rdmsr(MSR_FSB_FREQ, msr_lo, msr_tmp);
/* see table B-2 of 25366920.pdf */
@@ -153,12 +161,15 @@ static unsigned int pentium_core_get_frequency(void)
}
rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp);
dprintk("PCORE - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp);
dprintk("PCORE - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n",
msr_lo, msr_tmp);
msr_tmp = (msr_lo >> 22) & 0x1f;
dprintk("bits 22-26 are 0x%x, speed is %u\n", msr_tmp, (msr_tmp * fsb));
dprintk("bits 22-26 are 0x%x, speed is %u\n",
msr_tmp, (msr_tmp * fsb));
return (msr_tmp * fsb);
ret = (msr_tmp * fsb);
return ret;
}
@@ -167,6 +178,16 @@ static unsigned int pentium4_get_frequency(void)
struct cpuinfo_x86 *c = &boot_cpu_data;
u32 msr_lo, msr_hi, mult;
unsigned int fsb = 0;
unsigned int ret;
u8 fsb_code;
/* Pentium 4 Model 0 and 1 do not have the Core Clock Frequency
* to System Bus Frequency Ratio Field in the Processor Frequency
* Configuration Register of the MSR. Therefore the current
* frequency cannot be calculated and has to be measured.
*/
if (c->x86_model < 2)
return cpu_khz;
rdmsr(0x2c, msr_lo, msr_hi);
@@ -177,62 +198,61 @@ static unsigned int pentium4_get_frequency(void)
* revision #12 in Table B-1: MSRs in the Pentium 4 and
* Intel Xeon Processors, on page B-4 and B-5.
*/
if (c->x86_model < 2)
fsb_code = (msr_lo >> 16) & 0x7;
switch (fsb_code) {
case 0:
fsb = 100 * 1000;
else {
u8 fsb_code = (msr_lo >> 16) & 0x7;
switch (fsb_code) {
case 0:
fsb = 100 * 1000;
break;
case 1:
fsb = 13333 * 10;
break;
case 2:
fsb = 200 * 1000;
break;
}
break;
case 1:
fsb = 13333 * 10;
break;
case 2:
fsb = 200 * 1000;
break;
}
if (!fsb)
printk(KERN_DEBUG "speedstep-lib: couldn't detect FSB speed. Please send an e-mail to <linux@brodo.de>\n");
printk(KERN_DEBUG PFX "couldn't detect FSB speed. "
"Please send an e-mail to <linux@brodo.de>\n");
/* Multiplier. */
mult = msr_lo >> 24;
dprintk("P4 - FSB %u kHz; Multiplier %u; Speed %u kHz\n", fsb, mult, (fsb * mult));
dprintk("P4 - FSB %u kHz; Multiplier %u; Speed %u kHz\n",
fsb, mult, (fsb * mult));
return (fsb * mult);
ret = (fsb * mult);
return ret;
}
unsigned int speedstep_get_processor_frequency(unsigned int processor)
unsigned int speedstep_get_frequency(unsigned int processor)
{
switch (processor) {
case SPEEDSTEP_PROCESSOR_PCORE:
case SPEEDSTEP_CPU_PCORE:
return pentium_core_get_frequency();
case SPEEDSTEP_PROCESSOR_PM:
case SPEEDSTEP_CPU_PM:
return pentiumM_get_frequency();
case SPEEDSTEP_PROCESSOR_P4D:
case SPEEDSTEP_PROCESSOR_P4M:
case SPEEDSTEP_CPU_P4D:
case SPEEDSTEP_CPU_P4M:
return pentium4_get_frequency();
case SPEEDSTEP_PROCESSOR_PIII_T:
case SPEEDSTEP_PROCESSOR_PIII_C:
case SPEEDSTEP_PROCESSOR_PIII_C_EARLY:
case SPEEDSTEP_CPU_PIII_T:
case SPEEDSTEP_CPU_PIII_C:
case SPEEDSTEP_CPU_PIII_C_EARLY:
return pentium3_get_frequency(processor);
default:
return 0;
};
return 0;
}
EXPORT_SYMBOL_GPL(speedstep_get_processor_frequency);
EXPORT_SYMBOL_GPL(speedstep_get_frequency);
/*********************************************************************
* DETECT SPEEDSTEP-CAPABLE PROCESSOR *
*********************************************************************/
unsigned int speedstep_detect_processor (void)
unsigned int speedstep_detect_processor(void)
{
struct cpuinfo_x86 *c = &cpu_data(0);
u32 ebx, msr_lo, msr_hi;
@@ -261,7 +281,7 @@ unsigned int speedstep_detect_processor (void)
* sample has ebx = 0x0f, production has 0x0e.
*/
if ((ebx == 0x0e) || (ebx == 0x0f))
return SPEEDSTEP_PROCESSOR_P4M;
return SPEEDSTEP_CPU_P4M;
break;
case 7:
/*
@@ -272,7 +292,7 @@ unsigned int speedstep_detect_processor (void)
* samples are only of B-stepping...
*/
if (ebx == 0x0e)
return SPEEDSTEP_PROCESSOR_P4M;
return SPEEDSTEP_CPU_P4M;
break;
case 9:
/*
@@ -288,10 +308,13 @@ unsigned int speedstep_detect_processor (void)
* M-P4-Ms may have either ebx=0xe or 0xf [see above]
* M-P4/533 have either ebx=0xe or 0xf. [25317607.pdf]
* also, M-P4M HTs have ebx=0x8, too
* For now, they are distinguished by the model_id string
* For now, they are distinguished by the model_id
* string
*/
if ((ebx == 0x0e) || (strstr(c->x86_model_id,"Mobile Intel(R) Pentium(R) 4") != NULL))
return SPEEDSTEP_PROCESSOR_P4M;
if ((ebx == 0x0e) ||
(strstr(c->x86_model_id,
"Mobile Intel(R) Pentium(R) 4") != NULL))
return SPEEDSTEP_CPU_P4M;
break;
default:
break;
@@ -301,7 +324,8 @@ unsigned int speedstep_detect_processor (void)
switch (c->x86_model) {
case 0x0B: /* Intel PIII [Tualatin] */
/* cpuid_ebx(1) is 0x04 for desktop PIII, 0x06 for mobile PIII-M */
/* cpuid_ebx(1) is 0x04 for desktop PIII,
* 0x06 for mobile PIII-M */
ebx = cpuid_ebx(0x00000001);
dprintk("ebx is %x\n", ebx);
@@ -313,14 +337,15 @@ unsigned int speedstep_detect_processor (void)
/* So far all PIII-M processors support SpeedStep. See
* Intel's 24540640.pdf of June 2003
*/
return SPEEDSTEP_PROCESSOR_PIII_T;
return SPEEDSTEP_CPU_PIII_T;
case 0x08: /* Intel PIII [Coppermine] */
/* all mobile PIII Coppermines have FSB 100 MHz
* ==> sort out a few desktop PIIIs. */
rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_hi);
dprintk("Coppermine: MSR_IA32_EBL_CR_POWERON is 0x%x, 0x%x\n", msr_lo, msr_hi);
dprintk("Coppermine: MSR_IA32_EBL_CR_POWERON is 0x%x, 0x%x\n",
msr_lo, msr_hi);
msr_lo &= 0x00c0000;
if (msr_lo != 0x0080000)
return 0;
@@ -332,13 +357,15 @@ unsigned int speedstep_detect_processor (void)
* bit 56 or 57 is set
*/
rdmsr(MSR_IA32_PLATFORM_ID, msr_lo, msr_hi);
dprintk("Coppermine: MSR_IA32_PLATFORM ID is 0x%x, 0x%x\n", msr_lo, msr_hi);
if ((msr_hi & (1<<18)) && (relaxed_check ? 1 : (msr_hi & (3<<24)))) {
dprintk("Coppermine: MSR_IA32_PLATFORM ID is 0x%x, 0x%x\n",
msr_lo, msr_hi);
if ((msr_hi & (1<<18)) &&
(relaxed_check ? 1 : (msr_hi & (3<<24)))) {
if (c->x86_mask == 0x01) {
dprintk("early PIII version\n");
return SPEEDSTEP_PROCESSOR_PIII_C_EARLY;
return SPEEDSTEP_CPU_PIII_C_EARLY;
} else
return SPEEDSTEP_PROCESSOR_PIII_C;
return SPEEDSTEP_CPU_PIII_C;
}
default:
@@ -369,7 +396,7 @@ unsigned int speedstep_get_freqs(unsigned int processor,
dprintk("trying to determine both speeds\n");
/* get current speed */
prev_speed = speedstep_get_processor_frequency(processor);
prev_speed = speedstep_get_frequency(processor);
if (!prev_speed)
return -EIO;
@@ -379,7 +406,7 @@ unsigned int speedstep_get_freqs(unsigned int processor,
/* switch to low state */
set_state(SPEEDSTEP_LOW);
*low_speed = speedstep_get_processor_frequency(processor);
*low_speed = speedstep_get_frequency(processor);
if (!*low_speed) {
ret = -EIO;
goto out;
@@ -398,7 +425,7 @@ unsigned int speedstep_get_freqs(unsigned int processor,
if (transition_latency)
do_gettimeofday(&tv2);
*high_speed = speedstep_get_processor_frequency(processor);
*high_speed = speedstep_get_frequency(processor);
if (!*high_speed) {
ret = -EIO;
goto out;
@@ -426,9 +453,12 @@ unsigned int speedstep_get_freqs(unsigned int processor,
/* check if the latency measurement is too high or too low
* and set it to a safe value (500uSec) in that case
*/
if (*transition_latency > 10000000 || *transition_latency < 50000) {
printk (KERN_WARNING "speedstep: frequency transition measured seems out of "
"range (%u nSec), falling back to a safe one of %u nSec.\n",
if (*transition_latency > 10000000 ||
*transition_latency < 50000) {
printk(KERN_WARNING PFX "frequency transition "
"measured seems out of range (%u "
"nSec), falling back to a safe one of"
"%u nSec.\n",
*transition_latency, 500000);
*transition_latency = 500000;
}
@@ -436,15 +466,16 @@ unsigned int speedstep_get_freqs(unsigned int processor,
out:
local_irq_restore(flags);
return (ret);
return ret;
}
EXPORT_SYMBOL_GPL(speedstep_get_freqs);
#ifdef CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK
module_param(relaxed_check, int, 0444);
MODULE_PARM_DESC(relaxed_check, "Don't do all checks for speedstep capability.");
MODULE_PARM_DESC(relaxed_check,
"Don't do all checks for speedstep capability.");
#endif
MODULE_AUTHOR ("Dominik Brodowski <linux@brodo.de>");
MODULE_DESCRIPTION ("Library for Intel SpeedStep 1 or 2 cpufreq drivers.");
MODULE_LICENSE ("GPL");
MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
MODULE_DESCRIPTION("Library for Intel SpeedStep 1 or 2 cpufreq drivers.");
MODULE_LICENSE("GPL");

Просмотреть файл

@@ -12,17 +12,17 @@
/* processors */
#define SPEEDSTEP_PROCESSOR_PIII_C_EARLY 0x00000001 /* Coppermine core */
#define SPEEDSTEP_PROCESSOR_PIII_C 0x00000002 /* Coppermine core */
#define SPEEDSTEP_PROCESSOR_PIII_T 0x00000003 /* Tualatin core */
#define SPEEDSTEP_PROCESSOR_P4M 0x00000004 /* P4-M */
#define SPEEDSTEP_CPU_PIII_C_EARLY 0x00000001 /* Coppermine core */
#define SPEEDSTEP_CPU_PIII_C 0x00000002 /* Coppermine core */
#define SPEEDSTEP_CPU_PIII_T 0x00000003 /* Tualatin core */
#define SPEEDSTEP_CPU_P4M 0x00000004 /* P4-M */
/* the following processors are not speedstep-capable and are not auto-detected
* in speedstep_detect_processor(). However, their speed can be detected using
* the speedstep_get_processor_frequency() call. */
#define SPEEDSTEP_PROCESSOR_PM 0xFFFFFF03 /* Pentium M */
#define SPEEDSTEP_PROCESSOR_P4D 0xFFFFFF04 /* desktop P4 */
#define SPEEDSTEP_PROCESSOR_PCORE 0xFFFFFF05 /* Core */
* the speedstep_get_frequency() call. */
#define SPEEDSTEP_CPU_PM 0xFFFFFF03 /* Pentium M */
#define SPEEDSTEP_CPU_P4D 0xFFFFFF04 /* desktop P4 */
#define SPEEDSTEP_CPU_PCORE 0xFFFFFF05 /* Core */
/* speedstep states -- only two of them */
@@ -34,7 +34,7 @@
extern unsigned int speedstep_detect_processor (void);
/* detect the current speed (in khz) of the processor */
extern unsigned int speedstep_get_processor_frequency(unsigned int processor);
extern unsigned int speedstep_get_frequency(unsigned int processor);
/* detect the low and high speeds of the processor. The callback

Просмотреть файл

@@ -19,8 +19,8 @@
#include <linux/cpufreq.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <asm/ist.h>
#include <asm/io.h>
#include "speedstep-lib.h"
@@ -30,12 +30,12 @@
* If user gives it, these are used.
*
*/
static int smi_port = 0;
static int smi_cmd = 0;
static unsigned int smi_sig = 0;
static int smi_port;
static int smi_cmd;
static unsigned int smi_sig;
/* info about the processor */
static unsigned int speedstep_processor = 0;
static unsigned int speedstep_processor;
/*
* There are only two frequency states for each processor. Values
@@ -56,12 +56,13 @@ static struct cpufreq_frequency_table speedstep_freqs[] = {
* of DMA activity going on? */
#define SMI_TRIES 5
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-smi", msg)
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
"speedstep-smi", msg)
/**
* speedstep_smi_ownership
*/
static int speedstep_smi_ownership (void)
static int speedstep_smi_ownership(void)
{
u32 command, result, magic, dummy;
u32 function = GET_SPEEDSTEP_OWNER;
@@ -70,16 +71,18 @@ static int speedstep_smi_ownership (void)
command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
magic = virt_to_phys(magic_data);
dprintk("trying to obtain ownership with command %x at port %x\n", command, smi_port);
dprintk("trying to obtain ownership with command %x at port %x\n",
command, smi_port);
__asm__ __volatile__(
"push %%ebp\n"
"out %%al, (%%dx)\n"
"pop %%ebp\n"
: "=D" (result), "=a" (dummy), "=b" (dummy), "=c" (dummy), "=d" (dummy),
"=S" (dummy)
: "=D" (result),
"=a" (dummy), "=b" (dummy), "=c" (dummy), "=d" (dummy),
"=S" (dummy)
: "a" (command), "b" (function), "c" (0), "d" (smi_port),
"D" (0), "S" (magic)
"D" (0), "S" (magic)
: "memory"
);
@@ -97,10 +100,10 @@ static int speedstep_smi_ownership (void)
* even hangs [cf. bugme.osdl.org # 1422] on earlier systems. Empirical testing
* shows that the latter occurs if !(ist_info.event & 0xFFFF).
*/
static int speedstep_smi_get_freqs (unsigned int *low, unsigned int *high)
static int speedstep_smi_get_freqs(unsigned int *low, unsigned int *high)
{
u32 command, result = 0, edi, high_mhz, low_mhz, dummy;
u32 state=0;
u32 state = 0;
u32 function = GET_SPEEDSTEP_FREQS;
if (!(ist_info.event & 0xFFFF)) {
@@ -110,17 +113,25 @@ static int speedstep_smi_get_freqs (unsigned int *low, unsigned int *high)
command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
dprintk("trying to determine frequencies with command %x at port %x\n", command, smi_port);
dprintk("trying to determine frequencies with command %x at port %x\n",
command, smi_port);
__asm__ __volatile__(
"push %%ebp\n"
"out %%al, (%%dx)\n"
"pop %%ebp"
: "=a" (result), "=b" (high_mhz), "=c" (low_mhz), "=d" (state), "=D" (edi), "=S" (dummy)
: "a" (command), "b" (function), "c" (state), "d" (smi_port), "S" (0), "D" (0)
: "=a" (result),
"=b" (high_mhz),
"=c" (low_mhz),
"=d" (state), "=D" (edi), "=S" (dummy)
: "a" (command),
"b" (function),
"c" (state),
"d" (smi_port), "S" (0), "D" (0)
);
dprintk("result %x, low_freq %u, high_freq %u\n", result, low_mhz, high_mhz);
dprintk("result %x, low_freq %u, high_freq %u\n",
result, low_mhz, high_mhz);
/* abort if results are obviously incorrect... */
if ((high_mhz + low_mhz) < 600)
@@ -137,26 +148,30 @@ static int speedstep_smi_get_freqs (unsigned int *low, unsigned int *high)
* @state: processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
*
*/
static int speedstep_get_state (void)
static int speedstep_get_state(void)
{
u32 function=GET_SPEEDSTEP_STATE;
u32 function = GET_SPEEDSTEP_STATE;
u32 result, state, edi, command, dummy;
command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
dprintk("trying to determine current setting with command %x at port %x\n", command, smi_port);
dprintk("trying to determine current setting with command %x "
"at port %x\n", command, smi_port);
__asm__ __volatile__(
"push %%ebp\n"
"out %%al, (%%dx)\n"
"pop %%ebp\n"
: "=a" (result), "=b" (state), "=D" (edi), "=c" (dummy), "=d" (dummy), "=S" (dummy)
: "a" (command), "b" (function), "c" (0), "d" (smi_port), "S" (0), "D" (0)
: "=a" (result),
"=b" (state), "=D" (edi),
"=c" (dummy), "=d" (dummy), "=S" (dummy)
: "a" (command), "b" (function), "c" (0),
"d" (smi_port), "S" (0), "D" (0)
);
dprintk("state is %x, result is %x\n", state, result);
return (state & 1);
return state & 1;
}
@@ -165,11 +180,11 @@ static int speedstep_get_state (void)
* @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
*
*/
static void speedstep_set_state (unsigned int state)
static void speedstep_set_state(unsigned int state)
{
unsigned int result = 0, command, new_state, dummy;
unsigned long flags;
unsigned int function=SET_SPEEDSTEP_STATE;
unsigned int function = SET_SPEEDSTEP_STATE;
unsigned int retry = 0;
if (state > 0x1)
@@ -180,11 +195,14 @@ static void speedstep_set_state (unsigned int state)
command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
dprintk("trying to set frequency to state %u with command %x at port %x\n", state, command, smi_port);
dprintk("trying to set frequency to state %u "
"with command %x at port %x\n",
state, command, smi_port);
do {
if (retry) {
dprintk("retry %u, previous result %u, waiting...\n", retry, result);
dprintk("retry %u, previous result %u, waiting...\n",
retry, result);
mdelay(retry * 50);
}
retry++;
@@ -192,20 +210,26 @@ static void speedstep_set_state (unsigned int state)
"push %%ebp\n"
"out %%al, (%%dx)\n"
"pop %%ebp"
: "=b" (new_state), "=D" (result), "=c" (dummy), "=a" (dummy),
"=d" (dummy), "=S" (dummy)
: "a" (command), "b" (function), "c" (state), "d" (smi_port), "S" (0), "D" (0)
: "=b" (new_state), "=D" (result),
"=c" (dummy), "=a" (dummy),
"=d" (dummy), "=S" (dummy)
: "a" (command), "b" (function), "c" (state),
"d" (smi_port), "S" (0), "D" (0)
);
} while ((new_state != state) && (retry <= SMI_TRIES));
/* enable IRQs */
local_irq_restore(flags);
if (new_state == state) {
dprintk("change to %u MHz succeeded after %u tries with result %u\n", (speedstep_freqs[new_state].frequency / 1000), retry, result);
} else {
printk(KERN_ERR "cpufreq: change to state %u failed with new_state %u and result %u\n", state, new_state, result);
}
if (new_state == state)
dprintk("change to %u MHz succeeded after %u tries "
"with result %u\n",
(speedstep_freqs[new_state].frequency / 1000),
retry, result);
else
printk(KERN_ERR "cpufreq: change to state %u "
"failed with new_state %u and result %u\n",
state, new_state, result);
return;
}
@@ -219,13 +243,14 @@ static void speedstep_set_state (unsigned int state)
*
* Sets a new CPUFreq policy/freq.
*/
static int speedstep_target (struct cpufreq_policy *policy,
static int speedstep_target(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int relation)
{
unsigned int newstate = 0;
struct cpufreq_freqs freqs;
if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate))
if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0],
target_freq, relation, &newstate))
return -EINVAL;
freqs.old = speedstep_freqs[speedstep_get_state()].frequency;
@@ -250,7 +275,7 @@ static int speedstep_target (struct cpufreq_policy *policy,
* Limit must be within speedstep_low_freq and speedstep_high_freq, with
* at least one border included.
*/
static int speedstep_verify (struct cpufreq_policy *policy)
static int speedstep_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]);
}
@@ -259,7 +284,8 @@ static int speedstep_verify (struct cpufreq_policy *policy)
static int speedstep_cpu_init(struct cpufreq_policy *policy)
{
int result;
unsigned int speed,state;
unsigned int speed, state;
unsigned int *low, *high;
/* capability check */
if (policy->cpu != 0)
@@ -272,19 +298,23 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
}
/* detect low and high frequency */
result = speedstep_smi_get_freqs(&speedstep_freqs[SPEEDSTEP_LOW].frequency,
&speedstep_freqs[SPEEDSTEP_HIGH].frequency);
low = &speedstep_freqs[SPEEDSTEP_LOW].frequency;
high = &speedstep_freqs[SPEEDSTEP_HIGH].frequency;
result = speedstep_smi_get_freqs(low, high);
if (result) {
/* fall back to speedstep_lib.c dection mechanism: try both states out */
dprintk("could not detect low and high frequencies by SMI call.\n");
/* fall back to speedstep_lib.c dection mechanism:
* try both states out */
dprintk("could not detect low and high frequencies "
"by SMI call.\n");
result = speedstep_get_freqs(speedstep_processor,
&speedstep_freqs[SPEEDSTEP_LOW].frequency,
&speedstep_freqs[SPEEDSTEP_HIGH].frequency,
low, high,
NULL,
&speedstep_set_state);
if (result) {
dprintk("could not detect two different speeds -- aborting.\n");
dprintk("could not detect two different speeds"
" -- aborting.\n");
return result;
} else
dprintk("workaround worked.\n");
@@ -295,7 +325,8 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
speed = speedstep_freqs[state].frequency;
dprintk("currently at %s speed setting - %i MHz\n",
(speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) ? "low" : "high",
(speed == speedstep_freqs[SPEEDSTEP_LOW].frequency)
? "low" : "high",
(speed / 1000));
/* cpuinfo and default policy values */
@@ -304,7 +335,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs);
if (result)
return (result);
return result;
cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu);
@@ -321,7 +352,7 @@ static unsigned int speedstep_get(unsigned int cpu)
{
if (cpu)
return -ENODEV;
return speedstep_get_processor_frequency(speedstep_processor);
return speedstep_get_frequency(speedstep_processor);
}
@@ -335,7 +366,7 @@ static int speedstep_resume(struct cpufreq_policy *policy)
return result;
}
static struct freq_attr* speedstep_attr[] = {
static struct freq_attr *speedstep_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
@@ -364,21 +395,23 @@ static int __init speedstep_init(void)
speedstep_processor = speedstep_detect_processor();
switch (speedstep_processor) {
case SPEEDSTEP_PROCESSOR_PIII_T:
case SPEEDSTEP_PROCESSOR_PIII_C:
case SPEEDSTEP_PROCESSOR_PIII_C_EARLY:
case SPEEDSTEP_CPU_PIII_T:
case SPEEDSTEP_CPU_PIII_C:
case SPEEDSTEP_CPU_PIII_C_EARLY:
break;
default:
speedstep_processor = 0;
}
if (!speedstep_processor) {
dprintk ("No supported Intel CPU detected.\n");
dprintk("No supported Intel CPU detected.\n");
return -ENODEV;
}
dprintk("signature:0x%.8lx, command:0x%.8lx, event:0x%.8lx, perf_level:0x%.8lx.\n",
ist_info.signature, ist_info.command, ist_info.event, ist_info.perf_level);
dprintk("signature:0x%.8lx, command:0x%.8lx, "
"event:0x%.8lx, perf_level:0x%.8lx.\n",
ist_info.signature, ist_info.command,
ist_info.event, ist_info.perf_level);
/* Error if no IST-SMI BIOS or no PARM
sig= 'ISGE' aka 'Intel Speedstep Gate E' */
@@ -416,17 +449,20 @@ static void __exit speedstep_exit(void)
cpufreq_unregister_driver(&speedstep_driver);
}
module_param(smi_port, int, 0444);
module_param(smi_cmd, int, 0444);
module_param(smi_sig, uint, 0444);
module_param(smi_port, int, 0444);
module_param(smi_cmd, int, 0444);
module_param(smi_sig, uint, 0444);
MODULE_PARM_DESC(smi_port, "Override the BIOS-given IST port with this value -- Intel's default setting is 0xb2");
MODULE_PARM_DESC(smi_cmd, "Override the BIOS-given IST command with this value -- Intel's default setting is 0x82");
MODULE_PARM_DESC(smi_sig, "Set to 1 to fake the IST signature when using the SMI interface.");
MODULE_PARM_DESC(smi_port, "Override the BIOS-given IST port with this value "
"-- Intel's default setting is 0xb2");
MODULE_PARM_DESC(smi_cmd, "Override the BIOS-given IST command with this value "
"-- Intel's default setting is 0x82");
MODULE_PARM_DESC(smi_sig, "Set to 1 to fake the IST signature when using the "
"SMI interface.");
MODULE_AUTHOR ("Hiroshi Miura");
MODULE_DESCRIPTION ("Speedstep driver for IST applet SMI interface.");
MODULE_LICENSE ("GPL");
MODULE_AUTHOR("Hiroshi Miura");
MODULE_DESCRIPTION("Speedstep driver for IST applet SMI interface.");
MODULE_LICENSE("GPL");
module_init(speedstep_init);
module_exit(speedstep_exit);

Просмотреть файл

@@ -61,23 +61,23 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
*/
static unsigned char Cx86_dir0_msb __cpuinitdata = 0;
static char Cx86_model[][9] __cpuinitdata = {
static const char __cpuinitconst Cx86_model[][9] = {
"Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ",
"M II ", "Unknown"
};
static char Cx486_name[][5] __cpuinitdata = {
static const char __cpuinitconst Cx486_name[][5] = {
"SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx",
"SRx2", "DRx2"
};
static char Cx486S_name[][4] __cpuinitdata = {
static const char __cpuinitconst Cx486S_name[][4] = {
"S", "S2", "Se", "S2e"
};
static char Cx486D_name[][4] __cpuinitdata = {
static const char __cpuinitconst Cx486D_name[][4] = {
"DX", "DX2", "?", "?", "?", "DX4"
};
static char Cx86_cb[] __cpuinitdata = "?.5x Core/Bus Clock";
static char cyrix_model_mult1[] __cpuinitdata = "12??43";
static char cyrix_model_mult2[] __cpuinitdata = "12233445";
static const char __cpuinitconst cyrix_model_mult1[] = "12??43";
static const char __cpuinitconst cyrix_model_mult2[] = "12233445";
/*
* Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old
@@ -435,7 +435,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c)
}
}
static struct cpu_dev cyrix_cpu_dev __cpuinitdata = {
static const struct cpu_dev __cpuinitconst cyrix_cpu_dev = {
.c_vendor = "Cyrix",
.c_ident = { "CyrixInstead" },
.c_early_init = early_init_cyrix,
@@ -446,7 +446,7 @@ static struct cpu_dev cyrix_cpu_dev __cpuinitdata = {
cpu_dev_register(cyrix_cpu_dev);
static struct cpu_dev nsc_cpu_dev __cpuinitdata = {
static const struct cpu_dev __cpuinitconst nsc_cpu_dev = {
.c_vendor = "NSC",
.c_ident = { "Geode by NSC" },
.c_init = init_nsc,

Просмотреть файл

@@ -55,6 +55,11 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
c->x86_cache_alignment = 128;
#endif
/* CPUID workaround for 0F33/0F34 CPU */
if (c->x86 == 0xF && c->x86_model == 0x3
&& (c->x86_mask == 0x3 || c->x86_mask == 0x4))
c->x86_phys_bits = 36;
/*
* c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
* with P/T states and does not stop in deep C-states.
@@ -416,7 +421,7 @@ static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned i
}
#endif
static struct cpu_dev intel_cpu_dev __cpuinitdata = {
static const struct cpu_dev __cpuinitconst intel_cpu_dev = {
.c_vendor = "Intel",
.c_ident = { "GenuineIntel" },
#ifdef CONFIG_X86_32

Просмотреть файл

@@ -32,7 +32,7 @@ struct _cache_table
};
/* all the cache descriptor types we care about (no TLB or trace cache entries) */
static struct _cache_table cache_table[] __cpuinitdata =
static const struct _cache_table __cpuinitconst cache_table[] =
{
{ 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
{ 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
@@ -159,7 +159,7 @@ struct _cpuid4_info_regs {
unsigned long can_disable;
};
#ifdef CONFIG_PCI
#if defined(CONFIG_PCI) && defined(CONFIG_SYSFS)
static struct pci_device_id k8_nb_id[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) },
@@ -206,15 +206,15 @@ union l3_cache {
unsigned val;
};
static unsigned short assocs[] __cpuinitdata = {
static const unsigned short __cpuinitconst assocs[] = {
[1] = 1, [2] = 2, [4] = 4, [6] = 8,
[8] = 16, [0xa] = 32, [0xb] = 48,
[0xc] = 64,
[0xf] = 0xffff // ??
};
static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 };
static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 };
static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
static void __cpuinit
amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
@@ -324,15 +324,6 @@ __cpuinit cpuid4_cache_lookup_regs(int index,
return 0;
}
static int
__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
{
struct _cpuid4_info_regs *leaf_regs =
(struct _cpuid4_info_regs *)this_leaf;
return cpuid4_cache_lookup_regs(index, leaf_regs);
}
static int __cpuinit find_num_cache_leaves(void)
{
unsigned int eax, ebx, ecx, edx;
@@ -508,6 +499,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
return l2;
}
#ifdef CONFIG_SYSFS
/* pointer to _cpuid4_info array (for each cache leaf) */
static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
#define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
@@ -571,6 +564,15 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
per_cpu(cpuid4_info, cpu) = NULL;
}
static int
__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
{
struct _cpuid4_info_regs *leaf_regs =
(struct _cpuid4_info_regs *)this_leaf;
return cpuid4_cache_lookup_regs(index, leaf_regs);
}
static void __cpuinit get_cpu_leaves(void *_retval)
{
int j, *retval = _retval, cpu = smp_processor_id();
@@ -612,8 +614,6 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
return retval;
}
#ifdef CONFIG_SYSFS
#include <linux/kobject.h>
#include <linux/sysfs.h>

Просмотреть файл

@@ -639,7 +639,7 @@ static void mce_init_timer(void)
if (!next_interval)
return;
setup_timer(t, mcheck_timer, smp_processor_id());
t->expires = round_jiffies_relative(jiffies + next_interval);
t->expires = round_jiffies(jiffies + next_interval);
add_timer(t);
}
@@ -990,7 +990,7 @@ static struct sysdev_attribute *mce_attributes[] = {
NULL
};
static cpumask_t mce_device_initialized = CPU_MASK_NONE;
static cpumask_var_t mce_device_initialized;
/* Per cpu sysdev init. All of the cpus still share the same ctl bank */
static __cpuinit int mce_create_device(unsigned int cpu)
@@ -1021,7 +1021,7 @@ static __cpuinit int mce_create_device(unsigned int cpu)
if (err)
goto error2;
}
cpu_set(cpu, mce_device_initialized);
cpumask_set_cpu(cpu, mce_device_initialized);
return 0;
error2:
@@ -1043,7 +1043,7 @@ static __cpuinit void mce_remove_device(unsigned int cpu)
{
int i;
if (!cpu_isset(cpu, mce_device_initialized))
if (!cpumask_test_cpu(cpu, mce_device_initialized))
return;
for (i = 0; mce_attributes[i]; i++)
@@ -1053,7 +1053,7 @@ static __cpuinit void mce_remove_device(unsigned int cpu)
sysdev_remove_file(&per_cpu(device_mce, cpu),
&bank_attrs[i]);
sysdev_unregister(&per_cpu(device_mce,cpu));
cpu_clear(cpu, mce_device_initialized);
cpumask_clear_cpu(cpu, mce_device_initialized);
}
/* Make sure there are no machine checks on offlined CPUs. */
@@ -1110,7 +1110,7 @@ static int __cpuinit mce_cpu_callback(struct notifier_block *nfb,
break;
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
t->expires = round_jiffies_relative(jiffies + next_interval);
t->expires = round_jiffies(jiffies + next_interval);
add_timer_on(t, cpu);
smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
break;
@@ -1162,6 +1162,8 @@ static __init int mce_init_device(void)
if (!mce_available(&boot_cpu_data))
return -EIO;
alloc_cpumask_var(&mce_device_initialized, GFP_KERNEL);
err = mce_init_banks();
if (err)
return err;

Просмотреть файл

@@ -92,7 +92,8 @@ struct thresh_restart {
};
/* must be called with correct cpu affinity */
static long threshold_restart_bank(void *_tr)
/* Called via smp_call_function_single() */
static void threshold_restart_bank(void *_tr)
{
struct thresh_restart *tr = _tr;
u32 mci_misc_hi, mci_misc_lo;
@@ -119,7 +120,6 @@ static long threshold_restart_bank(void *_tr)
mci_misc_hi |= MASK_COUNT_EN_HI;
wrmsr(tr->b->address, mci_misc_lo, mci_misc_hi);
return 0;
}
/* cpu init entry point, called from mce.c with preempt off */
@@ -279,7 +279,7 @@ static ssize_t store_interrupt_enable(struct threshold_block *b,
tr.b = b;
tr.reset = 0;
tr.old_limit = 0;
work_on_cpu(b->cpu, threshold_restart_bank, &tr);
smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
return end - buf;
}
@@ -301,23 +301,32 @@ static ssize_t store_threshold_limit(struct threshold_block *b,
tr.b = b;
tr.reset = 0;
work_on_cpu(b->cpu, threshold_restart_bank, &tr);
smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
return end - buf;
}
static long local_error_count(void *_b)
struct threshold_block_cross_cpu {
struct threshold_block *tb;
long retval;
};
static void local_error_count_handler(void *_tbcc)
{
struct threshold_block *b = _b;
struct threshold_block_cross_cpu *tbcc = _tbcc;
struct threshold_block *b = tbcc->tb;
u32 low, high;
rdmsr(b->address, low, high);
return (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit);
tbcc->retval = (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit);
}
static ssize_t show_error_count(struct threshold_block *b, char *buf)
{
return sprintf(buf, "%lx\n", work_on_cpu(b->cpu, local_error_count, b));
struct threshold_block_cross_cpu tbcc = { .tb = b, };
smp_call_function_single(b->cpu, local_error_count_handler, &tbcc, 1);
return sprintf(buf, "%lx\n", tbcc.retval);
}
static ssize_t store_error_count(struct threshold_block *b,
@@ -325,7 +334,7 @@ static ssize_t store_error_count(struct threshold_block *b,
{
struct thresh_restart tr = { .b = b, .reset = 1, .old_limit = 0 };
work_on_cpu(b->cpu, threshold_restart_bank, &tr);
smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
return 1;
}
@@ -394,7 +403,7 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
if ((bank >= NR_BANKS) || (block >= NR_BLOCKS))
return 0;
if (rdmsr_safe(address, &low, &high))
if (rdmsr_safe_on_cpu(cpu, address, &low, &high))
return 0;
if (!(high & MASK_VALID_HI)) {
@@ -458,12 +467,11 @@ out_free:
return err;
}
static __cpuinit long local_allocate_threshold_blocks(void *_bank)
static __cpuinit long
local_allocate_threshold_blocks(int cpu, unsigned int bank)
{
unsigned int *bank = _bank;
return allocate_threshold_blocks(smp_processor_id(), *bank, 0,
MSR_IA32_MC0_MISC + *bank * 4);
return allocate_threshold_blocks(cpu, bank, 0,
MSR_IA32_MC0_MISC + bank * 4);
}
/* symlinks sibling shared banks to first core. first core owns dir/files. */
@@ -477,7 +485,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
#ifdef CONFIG_SMP
if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
i = cpumask_first(&per_cpu(cpu_core_map, cpu));
i = cpumask_first(cpu_core_mask(cpu));
/* first core not up yet */
if (cpu_data(i).cpu_core_id)
@@ -497,7 +505,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
if (err)
goto out;
cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu));
cpumask_copy(b->cpus, cpu_core_mask(cpu));
per_cpu(threshold_banks, cpu)[bank] = b;
goto out;
}
@@ -521,12 +529,12 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
#ifndef CONFIG_SMP
cpumask_setall(b->cpus);
#else
cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu));
cpumask_copy(b->cpus, cpu_core_mask(cpu));
#endif
per_cpu(threshold_banks, cpu)[bank] = b;
err = work_on_cpu(cpu, local_allocate_threshold_blocks, &bank);
err = local_allocate_threshold_blocks(cpu, bank);
if (err)
goto out_free;

Просмотреть файл

@@ -249,7 +249,7 @@ void cmci_rediscover(int dying)
for_each_online_cpu (cpu) {
if (cpu == dying)
continue;
if (set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)))
if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
continue;
/* Recheck banks in case CPUs don't all have the same */
if (cmci_supported(&banks))
@@ -270,7 +270,7 @@ void cmci_reenable(void)
cmci_discover(banks, 0);
}
static __cpuinit void intel_init_cmci(void)
static void intel_init_cmci(void)
{
int banks;

Просмотреть файл

@@ -1,3 +1,3 @@
obj-y := main.o if.o generic.o state.o
obj-y := main.o if.o generic.o state.o cleanup.o
obj-$(CONFIG_X86_32) += amd.o cyrix.o centaur.o

1101
arch/x86/kernel/cpu/mtrr/cleanup.c Обычный файл

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@@ -33,13 +33,31 @@ u64 mtrr_tom2;
struct mtrr_state_type mtrr_state = {};
EXPORT_SYMBOL_GPL(mtrr_state);
static int __initdata mtrr_show;
static int __init mtrr_debug(char *opt)
/**
* BIOS is expected to clear MtrrFixDramModEn bit, see for example
* "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
* Opteron Processors" (26094 Rev. 3.30 February 2006), section
* "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set
* to 1 during BIOS initalization of the fixed MTRRs, then cleared to
* 0 for operation."
*/
static inline void k8_check_syscfg_dram_mod_en(void)
{
mtrr_show = 1;
return 0;
u32 lo, hi;
if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
(boot_cpu_data.x86 >= 0x0f)))
return;
rdmsr(MSR_K8_SYSCFG, lo, hi);
if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
printk(KERN_ERR FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
" not cleared by BIOS, clearing this bit\n",
smp_processor_id());
lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
mtrr_wrmsr(MSR_K8_SYSCFG, lo, hi);
}
}
early_param("mtrr.show", mtrr_debug);
/*
* Returns the effective MTRR type for the region
@@ -174,6 +192,8 @@ get_fixed_ranges(mtrr_type * frs)
unsigned int *p = (unsigned int *) frs;
int i;
k8_check_syscfg_dram_mod_en();
rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
for (i = 0; i < 2; i++)
@@ -188,18 +208,94 @@ void mtrr_save_fixed_ranges(void *info)
get_fixed_ranges(mtrr_state.fixed_ranges);
}
static void print_fixed(unsigned base, unsigned step, const mtrr_type*types)
static unsigned __initdata last_fixed_start;
static unsigned __initdata last_fixed_end;
static mtrr_type __initdata last_fixed_type;
static void __init print_fixed_last(void)
{
if (!last_fixed_end)
return;
printk(KERN_DEBUG " %05X-%05X %s\n", last_fixed_start,
last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type));
last_fixed_end = 0;
}
static void __init update_fixed_last(unsigned base, unsigned end,
mtrr_type type)
{
last_fixed_start = base;
last_fixed_end = end;
last_fixed_type = type;
}
static void __init print_fixed(unsigned base, unsigned step,
const mtrr_type *types)
{
unsigned i;
for (i = 0; i < 8; ++i, ++types, base += step)
printk(KERN_INFO "MTRR %05X-%05X %s\n",
base, base + step - 1, mtrr_attrib_to_str(*types));
for (i = 0; i < 8; ++i, ++types, base += step) {
if (last_fixed_end == 0) {
update_fixed_last(base, base + step, *types);
continue;
}
if (last_fixed_end == base && last_fixed_type == *types) {
last_fixed_end = base + step;
continue;
}
/* new segments: gap or different type */
print_fixed_last();
update_fixed_last(base, base + step, *types);
}
}
static void prepare_set(void);
static void post_set(void);
static void __init print_mtrr_state(void)
{
unsigned int i;
int high_width;
printk(KERN_DEBUG "MTRR default type: %s\n",
mtrr_attrib_to_str(mtrr_state.def_type));
if (mtrr_state.have_fixed) {
printk(KERN_DEBUG "MTRR fixed ranges %sabled:\n",
mtrr_state.enabled & 1 ? "en" : "dis");
print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
for (i = 0; i < 2; ++i)
print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
for (i = 0; i < 8; ++i)
print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
/* tail */
print_fixed_last();
}
printk(KERN_DEBUG "MTRR variable ranges %sabled:\n",
mtrr_state.enabled & 2 ? "en" : "dis");
high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
for (i = 0; i < num_var_ranges; ++i) {
if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
printk(KERN_DEBUG " %u base %0*X%05X000 mask %0*X%05X000 %s\n",
i,
high_width,
mtrr_state.var_ranges[i].base_hi,
mtrr_state.var_ranges[i].base_lo >> 12,
high_width,
mtrr_state.var_ranges[i].mask_hi,
mtrr_state.var_ranges[i].mask_lo >> 12,
mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
else
printk(KERN_DEBUG " %u disabled\n", i);
}
if (mtrr_tom2) {
printk(KERN_DEBUG "TOM2: %016llx aka %lldM\n",
mtrr_tom2, mtrr_tom2>>20);
}
}
/* Grab all of the MTRR state for this CPU into *state */
void __init get_mtrr_state(void)
{
@@ -231,41 +327,9 @@ void __init get_mtrr_state(void)
mtrr_tom2 |= low;
mtrr_tom2 &= 0xffffff800000ULL;
}
if (mtrr_show) {
int high_width;
printk(KERN_INFO "MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type));
if (mtrr_state.have_fixed) {
printk(KERN_INFO "MTRR fixed ranges %sabled:\n",
mtrr_state.enabled & 1 ? "en" : "dis");
print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
for (i = 0; i < 2; ++i)
print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
for (i = 0; i < 8; ++i)
print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
}
printk(KERN_INFO "MTRR variable ranges %sabled:\n",
mtrr_state.enabled & 2 ? "en" : "dis");
high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
for (i = 0; i < num_var_ranges; ++i) {
if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
printk(KERN_INFO "MTRR %u base %0*X%05X000 mask %0*X%05X000 %s\n",
i,
high_width,
mtrr_state.var_ranges[i].base_hi,
mtrr_state.var_ranges[i].base_lo >> 12,
high_width,
mtrr_state.var_ranges[i].mask_hi,
mtrr_state.var_ranges[i].mask_lo >> 12,
mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
else
printk(KERN_INFO "MTRR %u disabled\n", i);
}
if (mtrr_tom2) {
printk(KERN_INFO "TOM2: %016llx aka %lldM\n",
mtrr_tom2, mtrr_tom2>>20);
}
}
print_mtrr_state();
mtrr_state_set = 1;
/* PAT setup for BP. We need to go through sync steps here */
@@ -307,28 +371,11 @@ void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
smp_processor_id(), msr, a, b);
}
/**
* Enable and allow read/write of extended fixed-range MTRR bits on K8 CPUs
* see AMD publication no. 24593, chapter 3.2.1 for more information
*/
static inline void k8_enable_fixed_iorrs(void)
{
unsigned lo, hi;
rdmsr(MSR_K8_SYSCFG, lo, hi);
mtrr_wrmsr(MSR_K8_SYSCFG, lo
| K8_MTRRFIXRANGE_DRAM_ENABLE
| K8_MTRRFIXRANGE_DRAM_MODIFY, hi);
}
/**
* set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have
* @msr: MSR address of the MTTR which should be checked and updated
* @changed: pointer which indicates whether the MTRR needed to be changed
* @msrwords: pointer to the MSR values which the MSR should have
*
* If K8 extentions are wanted, update the K8 SYSCFG MSR also.
* See AMD publication no. 24593, chapter 7.8.1, page 233 for more information.
*/
static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
{
@@ -337,10 +384,6 @@ static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
rdmsr(msr, lo, hi);
if (lo != msrwords[0] || hi != msrwords[1]) {
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
(boot_cpu_data.x86 >= 0x0f && boot_cpu_data.x86 <= 0x11) &&
((msrwords[0] | msrwords[1]) & K8_MTRR_RDMEM_WRMEM_MASK))
k8_enable_fixed_iorrs();
mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
*changed = true;
}
@@ -376,22 +419,31 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
{
unsigned int mask_lo, mask_hi, base_lo, base_hi;
unsigned int tmp, hi;
int cpu;
/*
* get_mtrr doesn't need to update mtrr_state, also it could be called
* from any cpu, so try to print it out directly.
*/
cpu = get_cpu();
rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
if ((mask_lo & 0x800) == 0) {
/* Invalid (i.e. free) range */
*base = 0;
*size = 0;
*type = 0;
return;
goto out_put_cpu;
}
rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
/* Work out the shifted address mask. */
/* Work out the shifted address mask: */
tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
mask_lo = size_or_mask | tmp;
/* Expand tmp with high bits to all 1s*/
/* Expand tmp with high bits to all 1s: */
hi = fls(tmp);
if (hi > 0) {
tmp |= ~((1<<(hi - 1)) - 1);
@@ -402,11 +454,16 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
}
}
/* This works correctly if size is a power of two, i.e. a
contiguous range. */
/*
* This works correctly if size is a power of two, i.e. a
* contiguous range:
*/
*size = -mask_lo;
*base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
*type = base_lo & 0xff;
out_put_cpu:
put_cpu();
}
/**
@@ -419,6 +476,8 @@ static int set_fixed_ranges(mtrr_type * frs)
bool changed = false;
int block=-1, range;
k8_check_syscfg_dram_mod_en();
while (fixed_range_blocks[++block].ranges)
for (range=0; range < fixed_range_blocks[block].ranges; range++)
set_fixed_range(fixed_range_blocks[block].base_msr + range,

Просмотреть файл

@@ -377,10 +377,6 @@ static const struct file_operations mtrr_fops = {
.release = mtrr_close,
};
static struct proc_dir_entry *proc_root_mtrr;
static int mtrr_seq_show(struct seq_file *seq, void *offset)
{
char factor;
@@ -423,11 +419,7 @@ static int __init mtrr_if_init(void)
(!cpu_has(c, X86_FEATURE_CENTAUR_MCR)))
return -ENODEV;
proc_root_mtrr =
proc_create("mtrr", S_IWUSR | S_IRUGO, NULL, &mtrr_fops);
if (proc_root_mtrr)
proc_root_mtrr->owner = THIS_MODULE;
proc_create("mtrr", S_IWUSR | S_IRUGO, NULL, &mtrr_fops);
return 0;
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@@ -79,6 +79,7 @@ extern struct mtrr_ops * mtrr_if;
extern unsigned int num_var_ranges;
extern u64 mtrr_tom2;
extern struct mtrr_state_type mtrr_state;
void mtrr_state_warn(void);
const char *mtrr_attrib_to_str(int x);
@@ -88,3 +89,6 @@ void mtrr_wrmsr(unsigned, unsigned, unsigned);
int amd_init_mtrr(void);
int cyrix_init_mtrr(void);
int centaur_init_mtrr(void);
extern int changed_by_mtrr_cleanup;
extern int mtrr_cleanup(unsigned address_bits);

Просмотреть файл

@@ -14,7 +14,7 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
if (c->x86_max_cores * smp_num_siblings > 1) {
seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
seq_printf(m, "siblings\t: %d\n",
cpus_weight(per_cpu(cpu_core_map, cpu)));
cpumask_weight(cpu_sibling_mask(cpu)));
seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
seq_printf(m, "apicid\t\t: %d\n", c->apicid);
@@ -143,9 +143,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
static void *c_start(struct seq_file *m, loff_t *pos)
{
if (*pos == 0) /* just in case, cpu 0 is not the first */
*pos = first_cpu(cpu_online_map);
*pos = cpumask_first(cpu_online_mask);
else
*pos = next_cpu_nr(*pos - 1, cpu_online_map);
*pos = cpumask_next(*pos - 1, cpu_online_mask);
if ((*pos) < nr_cpu_ids)
return &cpu_data(*pos);
return NULL;

Просмотреть файл

@@ -98,7 +98,7 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
#endif
}
static struct cpu_dev transmeta_cpu_dev __cpuinitdata = {
static const struct cpu_dev __cpuinitconst transmeta_cpu_dev = {
.c_vendor = "Transmeta",
.c_ident = { "GenuineTMx86", "TransmetaCPU" },
.c_early_init = early_init_transmeta,

Просмотреть файл

@@ -8,7 +8,7 @@
* so no special init takes place.
*/
static struct cpu_dev umc_cpu_dev __cpuinitdata = {
static const struct cpu_dev __cpuinitconst umc_cpu_dev = {
.c_vendor = "UMC",
.c_ident = { "UMC UMC UMC" },
.c_models = {

Просмотреть файл

@@ -110,19 +110,50 @@ int __init e820_all_mapped(u64 start, u64 end, unsigned type)
/*
* Add a memory region to the kernel e820 map.
*/
void __init e820_add_region(u64 start, u64 size, int type)
static void __init __e820_add_region(struct e820map *e820x, u64 start, u64 size,
int type)
{
int x = e820.nr_map;
int x = e820x->nr_map;
if (x == ARRAY_SIZE(e820.map)) {
if (x == ARRAY_SIZE(e820x->map)) {
printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
return;
}
e820.map[x].addr = start;
e820.map[x].size = size;
e820.map[x].type = type;
e820.nr_map++;
e820x->map[x].addr = start;
e820x->map[x].size = size;
e820x->map[x].type = type;
e820x->nr_map++;
}
void __init e820_add_region(u64 start, u64 size, int type)
{
__e820_add_region(&e820, start, size, type);
}
static void __init e820_print_type(u32 type)
{
switch (type) {
case E820_RAM:
case E820_RESERVED_KERN:
printk(KERN_CONT "(usable)");
break;
case E820_RESERVED:
printk(KERN_CONT "(reserved)");
break;
case E820_ACPI:
printk(KERN_CONT "(ACPI data)");
break;
case E820_NVS:
printk(KERN_CONT "(ACPI NVS)");
break;
case E820_UNUSABLE:
printk(KERN_CONT "(unusable)");
break;
default:
printk(KERN_CONT "type %u", type);
break;
}
}
void __init e820_print_map(char *who)
@@ -134,27 +165,8 @@ void __init e820_print_map(char *who)
(unsigned long long) e820.map[i].addr,
(unsigned long long)
(e820.map[i].addr + e820.map[i].size));
switch (e820.map[i].type) {
case E820_RAM:
case E820_RESERVED_KERN:
printk(KERN_CONT "(usable)\n");
break;
case E820_RESERVED:
printk(KERN_CONT "(reserved)\n");
break;
case E820_ACPI:
printk(KERN_CONT "(ACPI data)\n");
break;
case E820_NVS:
printk(KERN_CONT "(ACPI NVS)\n");
break;
case E820_UNUSABLE:
printk("(unusable)\n");
break;
default:
printk(KERN_CONT "type %u\n", e820.map[i].type);
break;
}
e820_print_type(e820.map[i].type);
printk(KERN_CONT "\n");
}
}
@@ -221,7 +233,7 @@ void __init e820_print_map(char *who)
*/
int __init sanitize_e820_map(struct e820entry *biosmap, int max_nr_map,
int *pnr_map)
u32 *pnr_map)
{
struct change_member {
struct e820entry *pbios; /* pointer to original bios entry */
@@ -417,11 +429,12 @@ static int __init append_e820_map(struct e820entry *biosmap, int nr_map)
return __append_e820_map(biosmap, nr_map);
}
static u64 __init e820_update_range_map(struct e820map *e820x, u64 start,
static u64 __init __e820_update_range(struct e820map *e820x, u64 start,
u64 size, unsigned old_type,
unsigned new_type)
{
int i;
u64 end;
unsigned int i;
u64 real_updated_size = 0;
BUG_ON(old_type == new_type);
@@ -429,27 +442,55 @@ static u64 __init e820_update_range_map(struct e820map *e820x, u64 start,
if (size > (ULLONG_MAX - start))
size = ULLONG_MAX - start;
for (i = 0; i < e820.nr_map; i++) {
end = start + size;
printk(KERN_DEBUG "e820 update range: %016Lx - %016Lx ",
(unsigned long long) start,
(unsigned long long) end);
e820_print_type(old_type);
printk(KERN_CONT " ==> ");
e820_print_type(new_type);
printk(KERN_CONT "\n");
for (i = 0; i < e820x->nr_map; i++) {
struct e820entry *ei = &e820x->map[i];
u64 final_start, final_end;
u64 ei_end;
if (ei->type != old_type)
continue;
/* totally covered? */
if (ei->addr >= start &&
(ei->addr + ei->size) <= (start + size)) {
ei_end = ei->addr + ei->size;
/* totally covered by new range? */
if (ei->addr >= start && ei_end <= end) {
ei->type = new_type;
real_updated_size += ei->size;
continue;
}
/* new range is totally covered? */
if (ei->addr < start && ei_end > end) {
__e820_add_region(e820x, start, size, new_type);
__e820_add_region(e820x, end, ei_end - end, ei->type);
ei->size = start - ei->addr;
real_updated_size += size;
continue;
}
/* partially covered */
final_start = max(start, ei->addr);
final_end = min(start + size, ei->addr + ei->size);
final_end = min(end, ei_end);
if (final_start >= final_end)
continue;
e820_add_region(final_start, final_end - final_start,
new_type);
__e820_add_region(e820x, final_start, final_end - final_start,
new_type);
real_updated_size += final_end - final_start;
/*
* left range could be head or tail, so need to update
* size at first.
*/
ei->size -= final_end - final_start;
if (ei->addr < final_start)
continue;
@@ -461,13 +502,13 @@ static u64 __init e820_update_range_map(struct e820map *e820x, u64 start,
u64 __init e820_update_range(u64 start, u64 size, unsigned old_type,
unsigned new_type)
{
return e820_update_range_map(&e820, start, size, old_type, new_type);
return __e820_update_range(&e820, start, size, old_type, new_type);
}
static u64 __init e820_update_range_saved(u64 start, u64 size,
unsigned old_type, unsigned new_type)
{
return e820_update_range_map(&e820_saved, start, size, old_type,
return __e820_update_range(&e820_saved, start, size, old_type,
new_type);
}
@@ -511,7 +552,7 @@ u64 __init e820_remove_range(u64 start, u64 size, unsigned old_type,
void __init update_e820(void)
{
int nr_map;
u32 nr_map;
nr_map = e820.nr_map;
if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr_map))
@@ -522,7 +563,7 @@ void __init update_e820(void)
}
static void __init update_e820_saved(void)
{
int nr_map;
u32 nr_map;
nr_map = e820_saved.nr_map;
if (sanitize_e820_map(e820_saved.map, ARRAY_SIZE(e820_saved.map), &nr_map))
@@ -1020,8 +1061,8 @@ u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align)
continue;
return addr;
}
return -1UL;
return -1ULL;
}
/*
@@ -1034,13 +1075,22 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
u64 start;
start = startt;
while (size < sizet)
while (size < sizet && (start + 1))
start = find_e820_area_size(start, &size, align);
if (size < sizet)
return 0;
#ifdef CONFIG_X86_32
if (start >= MAXMEM)
return 0;
if (start + size > MAXMEM)
size = MAXMEM - start;
#endif
addr = round_down(start + size - sizet, align);
if (addr < start)
return 0;
e820_update_range(addr, sizet, E820_RAM, E820_RESERVED);
e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED);
printk(KERN_INFO "update e820 for early_reserve_e820\n");
@@ -1253,7 +1303,7 @@ early_param("memmap", parse_memmap_opt);
void __init finish_e820_parsing(void)
{
if (userdef) {
int nr = e820.nr_map;
u32 nr = e820.nr_map;
if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr) < 0)
early_panic("Invalid user supplied memory map");
@@ -1336,7 +1386,7 @@ void __init e820_reserve_resources_late(void)
char *__init default_machine_specific_memory_setup(void)
{
char *who = "BIOS-e820";
int new_nr;
u32 new_nr;
/*
* Try to copy the BIOS-supplied E820-map.
*

Просмотреть файл

@@ -250,7 +250,7 @@ static int dbgp_wait_until_complete(void)
return (ctrl & DBGP_ERROR) ? -DBGP_ERRCODE(ctrl) : DBGP_LEN(ctrl);
}
static void dbgp_mdelay(int ms)
static void __init dbgp_mdelay(int ms)
{
int i;
@@ -311,7 +311,7 @@ static void dbgp_set_data(const void *buf, int size)
writel(hi, &ehci_debug->data47);
}
static void dbgp_get_data(void *buf, int size)
static void __init dbgp_get_data(void *buf, int size)
{
unsigned char *bytes = buf;
u32 lo, hi;
@@ -355,7 +355,7 @@ static int dbgp_bulk_write(unsigned devnum, unsigned endpoint,
return ret;
}
static int dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data,
static int __init dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data,
int size)
{
u32 pids, addr, ctrl;
@@ -386,8 +386,8 @@ static int dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data,
return ret;
}
static int dbgp_control_msg(unsigned devnum, int requesttype, int request,
int value, int index, void *data, int size)
static int __init dbgp_control_msg(unsigned devnum, int requesttype,
int request, int value, int index, void *data, int size)
{
u32 pids, addr, ctrl;
struct usb_ctrlrequest req;
@@ -489,7 +489,7 @@ static u32 __init find_dbgp(int ehci_num, u32 *rbus, u32 *rslot, u32 *rfunc)
return 0;
}
static int ehci_reset_port(int port)
static int __init ehci_reset_port(int port)
{
u32 portsc;
u32 delay_time, delay;
@@ -532,7 +532,7 @@ static int ehci_reset_port(int port)
return -EBUSY;
}
static int ehci_wait_for_port(int port)
static int __init ehci_wait_for_port(int port)
{
u32 status;
int ret, reps;
@@ -557,13 +557,13 @@ static inline void dbgp_printk(const char *fmt, ...) { }
typedef void (*set_debug_port_t)(int port);
static void default_set_debug_port(int port)
static void __init default_set_debug_port(int port)
{
}
static set_debug_port_t set_debug_port = default_set_debug_port;
static set_debug_port_t __initdata set_debug_port = default_set_debug_port;
static void nvidia_set_debug_port(int port)
static void __init nvidia_set_debug_port(int port)
{
u32 dword;
dword = read_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func,

Просмотреть файл

@@ -442,8 +442,7 @@ sysenter_past_esp:
GET_THREAD_INFO(%ebp)
/* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
jnz sysenter_audit
sysenter_do_call:
cmpl $(nr_syscalls), %eax
@@ -454,7 +453,7 @@ sysenter_do_call:
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF
movl TI_flags(%ebp), %ecx
testw $_TIF_ALLWORK_MASK, %cx
testl $_TIF_ALLWORK_MASK, %ecx
jne sysexit_audit
sysenter_exit:
/* if something modifies registers it must also disable sysexit */
@@ -468,7 +467,7 @@ sysenter_exit:
#ifdef CONFIG_AUDITSYSCALL
sysenter_audit:
testw $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
jnz syscall_trace_entry
addl $4,%esp
CFI_ADJUST_CFA_OFFSET -4
@@ -485,7 +484,7 @@ sysenter_audit:
jmp sysenter_do_call
sysexit_audit:
testw $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %cx
testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
jne syscall_exit_work
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_ANY)
@@ -498,7 +497,7 @@ sysexit_audit:
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF
movl TI_flags(%ebp), %ecx
testw $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %cx
testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
jne syscall_exit_work
movl PT_EAX(%esp),%eax /* reload syscall return value */
jmp sysenter_exit
@@ -523,8 +522,7 @@ ENTRY(system_call)
SAVE_ALL
GET_THREAD_INFO(%ebp)
# system call tracing in operation / emulation
/* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
jnz syscall_trace_entry
cmpl $(nr_syscalls), %eax
jae syscall_badsys
@@ -538,7 +536,7 @@ syscall_exit:
# between sampling and the iret
TRACE_IRQS_OFF
movl TI_flags(%ebp), %ecx
testw $_TIF_ALLWORK_MASK, %cx # current->work
testl $_TIF_ALLWORK_MASK, %ecx # current->work
jne syscall_exit_work
restore_all:
@@ -673,7 +671,7 @@ END(syscall_trace_entry)
# perform syscall exit tracing
ALIGN
syscall_exit_work:
testb $_TIF_WORK_SYSCALL_EXIT, %cl
testl $_TIF_WORK_SYSCALL_EXIT, %ecx
jz work_pending
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call

Просмотреть файл

@@ -368,6 +368,7 @@ ENTRY(save_rest)
END(save_rest)
/* save complete stack frame */
.pushsection .kprobes.text, "ax"
ENTRY(save_paranoid)
XCPT_FRAME 1 RDI+8
cld
@@ -396,6 +397,7 @@ ENTRY(save_paranoid)
1: ret
CFI_ENDPROC
END(save_paranoid)
.popsection
/*
* A newly forked process directly context switches into this address.
@@ -416,7 +418,6 @@ ENTRY(ret_from_fork)
GET_THREAD_INFO(%rcx)
CFI_REMEMBER_STATE
RESTORE_REST
testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
@@ -428,7 +429,6 @@ ENTRY(ret_from_fork)
RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
jmp ret_from_sys_call # go to the SYSRET fastpath
CFI_RESTORE_STATE
CFI_ENDPROC
END(ret_from_fork)

Просмотреть файл

@@ -79,11 +79,11 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
*
* 1) Put the instruction pointer into the IP buffer
* and the new code into the "code" buffer.
* 2) Set a flag that says we are modifying code
* 3) Wait for any running NMIs to finish.
* 4) Write the code
* 5) clear the flag.
* 6) Wait for any running NMIs to finish.
* 2) Wait for any running NMIs to finish and set a flag that says
* we are modifying code, it is done in an atomic operation.
* 3) Write the code
* 4) clear the flag.
* 5) Wait for any running NMIs to finish.
*
* If an NMI is executed, the first thing it does is to call
* "ftrace_nmi_enter". This will check if the flag is set to write
@@ -95,9 +95,9 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
* are the same as what exists.
*/
#define MOD_CODE_WRITE_FLAG (1 << 31) /* set when NMI should do the write */
static atomic_t nmi_running = ATOMIC_INIT(0);
static int mod_code_status; /* holds return value of text write */
static int mod_code_write; /* set when NMI should do the write */
static void *mod_code_ip; /* holds the IP to write to */
static void *mod_code_newcode; /* holds the text to write to the IP */
@@ -114,6 +114,20 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
return r;
}
static void clear_mod_flag(void)
{
int old = atomic_read(&nmi_running);
for (;;) {
int new = old & ~MOD_CODE_WRITE_FLAG;
if (old == new)
break;
old = atomic_cmpxchg(&nmi_running, old, new);
}
}
static void ftrace_mod_code(void)
{
/*
@@ -127,27 +141,39 @@ static void ftrace_mod_code(void)
/* if we fail, then kill any new writers */
if (mod_code_status)
mod_code_write = 0;
clear_mod_flag();
}
void ftrace_nmi_enter(void)
{
atomic_inc(&nmi_running);
/* Must have nmi_running seen before reading write flag */
smp_mb();
if (mod_code_write) {
if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
smp_rmb();
ftrace_mod_code();
atomic_inc(&nmi_update_count);
}
/* Must have previous changes seen before executions */
smp_mb();
}
void ftrace_nmi_exit(void)
{
/* Finish all executions before clearing nmi_running */
smp_wmb();
smp_mb();
atomic_dec(&nmi_running);
}
static void wait_for_nmi_and_set_mod_flag(void)
{
if (!atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG))
return;
do {
cpu_relax();
} while (atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG));
nmi_wait_count++;
}
static void wait_for_nmi(void)
{
if (!atomic_read(&nmi_running))
@@ -167,14 +193,9 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
mod_code_newcode = new_code;
/* The buffers need to be visible before we let NMIs write them */
smp_wmb();
mod_code_write = 1;
/* Make sure write bit is visible before we wait on NMIs */
smp_mb();
wait_for_nmi();
wait_for_nmi_and_set_mod_flag();
/* Make sure all running NMIs have finished before we write the code */
smp_mb();
@@ -182,13 +203,9 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
ftrace_mod_code();
/* Make sure the write happens before clearing the bit */
smp_wmb();
mod_code_write = 0;
/* make sure NMIs see the cleared bit */
smp_mb();
clear_mod_flag();
wait_for_nmi();
return mod_code_status;
@@ -393,7 +410,6 @@ int ftrace_disable_ftrace_graph_caller(void)
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
{
unsigned long old;
unsigned long long calltime;
int faulted;
struct ftrace_graph_ent trace;
unsigned long return_hooker = (unsigned long)
@@ -436,10 +452,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
return;
}
calltime = trace_clock_local();
if (ftrace_push_return_trace(old, calltime,
self_addr, &trace.depth) == -EBUSY) {
if (ftrace_push_return_trace(old, self_addr, &trace.depth) == -EBUSY) {
*parent = old;
return;
}
@@ -453,3 +466,66 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
}
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_FTRACE_SYSCALLS
extern unsigned long __start_syscalls_metadata[];
extern unsigned long __stop_syscalls_metadata[];
extern unsigned long *sys_call_table;
static struct syscall_metadata **syscalls_metadata;
static struct syscall_metadata *find_syscall_meta(unsigned long *syscall)
{
struct syscall_metadata *start;
struct syscall_metadata *stop;
char str[KSYM_SYMBOL_LEN];
start = (struct syscall_metadata *)__start_syscalls_metadata;
stop = (struct syscall_metadata *)__stop_syscalls_metadata;
kallsyms_lookup((unsigned long) syscall, NULL, NULL, NULL, str);
for ( ; start < stop; start++) {
if (start->name && !strcmp(start->name, str))
return start;
}
return NULL;
}
struct syscall_metadata *syscall_nr_to_meta(int nr)
{
if (!syscalls_metadata || nr >= FTRACE_SYSCALL_MAX || nr < 0)
return NULL;
return syscalls_metadata[nr];
}
void arch_init_ftrace_syscalls(void)
{
int i;
struct syscall_metadata *meta;
unsigned long **psys_syscall_table = &sys_call_table;
static atomic_t refs;
if (atomic_inc_return(&refs) != 1)
goto end;
syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
FTRACE_SYSCALL_MAX, GFP_KERNEL);
if (!syscalls_metadata) {
WARN_ON(1);
return;
}
for (i = 0; i < FTRACE_SYSCALL_MAX; i++) {
meta = find_syscall_meta(psys_syscall_table[i]);
syscalls_metadata[i] = meta;
}
return;
/* Paranoid: avoid overflow */
end:
atomic_dec(&refs);
}
#endif

Просмотреть файл

@@ -18,7 +18,7 @@ void __init i386_start_kernel(void)
{
reserve_trampoline_memory();
reserve_early(__pa_symbol(&_text), __pa_symbol(&_end), "TEXT DATA BSS");
reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
#ifdef CONFIG_BLK_DEV_INITRD
/* Reserve INITRD */
@@ -29,9 +29,6 @@ void __init i386_start_kernel(void)
reserve_early(ramdisk_image, ramdisk_end, "RAMDISK");
}
#endif
reserve_early(init_pg_tables_start, init_pg_tables_end,
"INIT_PG_TABLE");
reserve_ebda_region();
/*

Просмотреть файл

@@ -100,7 +100,7 @@ void __init x86_64_start_reservations(char *real_mode_data)
reserve_trampoline_memory();
reserve_early(__pa_symbol(&_text), __pa_symbol(&_end), "TEXT DATA BSS");
reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
#ifdef CONFIG_BLK_DEV_INITRD
/* Reserve INITRD */

Просмотреть файл

@@ -38,42 +38,40 @@
#define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id
/*
* This is how much memory *in addition to the memory covered up to
* and including _end* we need mapped initially.
* This is how much memory in addition to the memory covered up to
* and including _end we need mapped initially.
* We need:
* - one bit for each possible page, but only in low memory, which means
* 2^32/4096/8 = 128K worst case (4G/4G split.)
* - enough space to map all low memory, which means
* (2^32/4096) / 1024 pages (worst case, non PAE)
* (2^32/4096) / 512 + 4 pages (worst case for PAE)
* - a few pages for allocator use before the kernel pagetable has
* been set up
* (KERNEL_IMAGE_SIZE/4096) / 1024 pages (worst case, non PAE)
* (KERNEL_IMAGE_SIZE/4096) / 512 + 4 pages (worst case for PAE)
*
* Modulo rounding, each megabyte assigned here requires a kilobyte of
* memory, which is currently unreclaimed.
*
* This should be a multiple of a page.
*
* KERNEL_IMAGE_SIZE should be greater than pa(_end)
* and small than max_low_pfn, otherwise will waste some page table entries
*/
LOW_PAGES = 1<<(32-PAGE_SHIFT_asm)
/*
* To preserve the DMA pool in PAGEALLOC kernels, we'll allocate
* pagetables from above the 16MB DMA limit, so we'll have to set
* up pagetables 16MB more (worst-case):
*/
#ifdef CONFIG_DEBUG_PAGEALLOC
LOW_PAGES = LOW_PAGES + 0x1000000
#endif
#if PTRS_PER_PMD > 1
PAGE_TABLE_SIZE = (LOW_PAGES / PTRS_PER_PMD) + PTRS_PER_PGD
#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
#else
PAGE_TABLE_SIZE = (LOW_PAGES / PTRS_PER_PGD)
#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
#endif
BOOTBITMAP_SIZE = LOW_PAGES / 8
ALLOCATOR_SLOP = 4
INIT_MAP_BEYOND_END = BOOTBITMAP_SIZE + (PAGE_TABLE_SIZE + ALLOCATOR_SLOP)*PAGE_SIZE_asm
/* Enough space to fit pagetables for the low memory linear map */
MAPPING_BEYOND_END = \
PAGE_TABLE_SIZE(((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT) << PAGE_SHIFT
/*
* Worst-case size of the kernel mapping we need to make:
* the worst-case size of the kernel itself, plus the extra we need
* to map for the linear map.
*/
KERNEL_PAGES = (KERNEL_IMAGE_SIZE + MAPPING_BEYOND_END)>>PAGE_SHIFT
INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
RESERVE_BRK(pagetables, INIT_MAP_SIZE)
/*
* 32-bit kernel entrypoint; only used by the boot CPU. On entry,
@@ -166,10 +164,10 @@ num_subarch_entries = (. - subarch_entries) / 4
/*
* Initialize page tables. This creates a PDE and a set of page
* tables, which are located immediately beyond _end. The variable
* init_pg_tables_end is set up to point to the first "safe" location.
* tables, which are located immediately beyond __brk_base. The variable
* _brk_end is set up to point to the first "safe" location.
* Mappings are created both at virtual address 0 (identity mapping)
* and PAGE_OFFSET for up to _end+sizeof(page tables)+INIT_MAP_BEYOND_END.
* and PAGE_OFFSET for up to _end.
*
* Note that the stack is not yet set up!
*/
@@ -190,8 +188,7 @@ default_entry:
xorl %ebx,%ebx /* %ebx is kept at zero */
movl $pa(pg0), %edi
movl %edi, pa(init_pg_tables_start)
movl $pa(__brk_base), %edi
movl $pa(swapper_pg_pmd), %edx
movl $PTE_IDENT_ATTR, %eax
10:
@@ -209,14 +206,14 @@ default_entry:
loop 11b
/*
* End condition: we must map up to and including INIT_MAP_BEYOND_END
* bytes beyond the end of our own page tables.
* End condition: we must map up to the end + MAPPING_BEYOND_END.
*/
leal (INIT_MAP_BEYOND_END+PTE_IDENT_ATTR)(%edi),%ebp
movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp
cmpl %ebp,%eax
jb 10b
1:
movl %edi,pa(init_pg_tables_end)
addl $__PAGE_OFFSET, %edi
movl %edi, pa(_brk_end)
shrl $12, %eax
movl %eax, pa(max_pfn_mapped)
@@ -227,8 +224,7 @@ default_entry:
page_pde_offset = (__PAGE_OFFSET >> 20);
movl $pa(pg0), %edi
movl %edi, pa(init_pg_tables_start)
movl $pa(__brk_base), %edi
movl $pa(swapper_pg_dir), %edx
movl $PTE_IDENT_ATTR, %eax
10:
@@ -242,14 +238,13 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
addl $0x1000,%eax
loop 11b
/*
* End condition: we must map up to and including INIT_MAP_BEYOND_END
* bytes beyond the end of our own page tables; the +0x007 is
* the attribute bits
* End condition: we must map up to the end + MAPPING_BEYOND_END.
*/
leal (INIT_MAP_BEYOND_END+PTE_IDENT_ATTR)(%edi),%ebp
movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp
cmpl %ebp,%eax
jb 10b
movl %edi,pa(init_pg_tables_end)
addl $__PAGE_OFFSET, %edi
movl %edi, pa(_brk_end)
shrl $12, %eax
movl %eax, pa(max_pfn_mapped)
@@ -636,6 +631,7 @@ swapper_pg_fixmap:
.fill 1024,4,0
ENTRY(empty_zero_page)
.fill 4096,1,0
/*
* This starts the data section.
*/

Просмотреть файл

@@ -80,6 +80,7 @@ static inline void hpet_clear_mapping(void)
*/
static int boot_hpet_disable;
int hpet_force_user;
static int hpet_verbose;
static int __init hpet_setup(char *str)
{
@@ -88,6 +89,8 @@ static int __init hpet_setup(char *str)
boot_hpet_disable = 1;
if (!strncmp("force", str, 5))
hpet_force_user = 1;
if (!strncmp("verbose", str, 7))
hpet_verbose = 1;
}
return 1;
}
@@ -119,6 +122,43 @@ int is_hpet_enabled(void)
}
EXPORT_SYMBOL_GPL(is_hpet_enabled);
static void _hpet_print_config(const char *function, int line)
{
u32 i, timers, l, h;
printk(KERN_INFO "hpet: %s(%d):\n", function, line);
l = hpet_readl(HPET_ID);
h = hpet_readl(HPET_PERIOD);
timers = ((l & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
printk(KERN_INFO "hpet: ID: 0x%x, PERIOD: 0x%x\n", l, h);
l = hpet_readl(HPET_CFG);
h = hpet_readl(HPET_STATUS);
printk(KERN_INFO "hpet: CFG: 0x%x, STATUS: 0x%x\n", l, h);
l = hpet_readl(HPET_COUNTER);
h = hpet_readl(HPET_COUNTER+4);
printk(KERN_INFO "hpet: COUNTER_l: 0x%x, COUNTER_h: 0x%x\n", l, h);
for (i = 0; i < timers; i++) {
l = hpet_readl(HPET_Tn_CFG(i));
h = hpet_readl(HPET_Tn_CFG(i)+4);
printk(KERN_INFO "hpet: T%d: CFG_l: 0x%x, CFG_h: 0x%x\n",
i, l, h);
l = hpet_readl(HPET_Tn_CMP(i));
h = hpet_readl(HPET_Tn_CMP(i)+4);
printk(KERN_INFO "hpet: T%d: CMP_l: 0x%x, CMP_h: 0x%x\n",
i, l, h);
l = hpet_readl(HPET_Tn_ROUTE(i));
h = hpet_readl(HPET_Tn_ROUTE(i)+4);
printk(KERN_INFO "hpet: T%d ROUTE_l: 0x%x, ROUTE_h: 0x%x\n",
i, l, h);
}
}
#define hpet_print_config() \
do { \
if (hpet_verbose) \
_hpet_print_config(__FUNCTION__, __LINE__); \
} while (0)
/*
* When the hpet driver (/dev/hpet) is enabled, we need to reserve
* timer 0 and timer 1 in case of RTC emulation.
@@ -191,27 +231,37 @@ static struct clock_event_device hpet_clockevent = {
.rating = 50,
};
static void hpet_start_counter(void)
static void hpet_stop_counter(void)
{
unsigned long cfg = hpet_readl(HPET_CFG);
cfg &= ~HPET_CFG_ENABLE;
hpet_writel(cfg, HPET_CFG);
hpet_writel(0, HPET_COUNTER);
hpet_writel(0, HPET_COUNTER + 4);
}
static void hpet_start_counter(void)
{
unsigned long cfg = hpet_readl(HPET_CFG);
cfg |= HPET_CFG_ENABLE;
hpet_writel(cfg, HPET_CFG);
}
static void hpet_restart_counter(void)
{
hpet_stop_counter();
hpet_start_counter();
}
static void hpet_resume_device(void)
{
force_hpet_resume();
}
static void hpet_restart_counter(void)
static void hpet_resume_counter(void)
{
hpet_resume_device();
hpet_start_counter();
hpet_restart_counter();
}
static void hpet_enable_legacy_int(void)
@@ -259,29 +309,23 @@ static int hpet_setup_msi_irq(unsigned int irq);
static void hpet_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt, int timer)
{
unsigned long cfg, cmp, now;
unsigned long cfg;
uint64_t delta;
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
hpet_stop_counter();
delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult;
delta >>= evt->shift;
now = hpet_readl(HPET_COUNTER);
cmp = now + (unsigned long) delta;
cfg = hpet_readl(HPET_Tn_CFG(timer));
/* Make sure we use edge triggered interrupts */
cfg &= ~HPET_TN_LEVEL;
cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
HPET_TN_SETVAL | HPET_TN_32BIT;
hpet_writel(cfg, HPET_Tn_CFG(timer));
/*
* The first write after writing TN_SETVAL to the
* config register sets the counter value, the second
* write sets the period.
*/
hpet_writel(cmp, HPET_Tn_CMP(timer));
udelay(1);
hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer));
hpet_start_counter();
hpet_print_config();
break;
case CLOCK_EVT_MODE_ONESHOT:
@@ -308,6 +352,7 @@ static void hpet_set_mode(enum clock_event_mode mode,
irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
enable_irq(hdev->irq);
}
hpet_print_config();
break;
}
}
@@ -526,6 +571,7 @@ static void hpet_msi_capability_lookup(unsigned int start_timer)
num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
num_timers++; /* Value read out starts from 0 */
hpet_print_config();
hpet_devs = kzalloc(sizeof(struct hpet_dev) * num_timers, GFP_KERNEL);
if (!hpet_devs)
@@ -695,7 +741,7 @@ static struct clocksource clocksource_hpet = {
.mask = HPET_MASK,
.shift = HPET_SHIFT,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.resume = hpet_restart_counter,
.resume = hpet_resume_counter,
#ifdef CONFIG_X86_64
.vread = vread_hpet,
#endif
@@ -707,7 +753,7 @@ static int hpet_clocksource_register(void)
cycle_t t1;
/* Start the counter */
hpet_start_counter();
hpet_restart_counter();
/* Verify whether hpet counter works */
t1 = read_hpet();
@@ -793,6 +839,7 @@ int __init hpet_enable(void)
* information and the number of channels
*/
id = hpet_readl(HPET_ID);
hpet_print_config();
#ifdef CONFIG_HPET_EMULATE_RTC
/*
@@ -845,6 +892,7 @@ static __init int hpet_late_init(void)
return -ENODEV;
hpet_reserve_platform_timers(hpet_readl(HPET_ID));
hpet_print_config();
for_each_online_cpu(cpu) {
hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu);

Просмотреть файл

@@ -3,17 +3,17 @@
*
*/
#include <linux/clockchips.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>
#include <asm/smp.h>
#include <asm/delay.h>
#include <asm/i8253.h>
#include <asm/io.h>
#include <asm/hpet.h>
#include <asm/smp.h>
DEFINE_SPINLOCK(i8253_lock);
EXPORT_SYMBOL(i8253_lock);
@@ -40,7 +40,7 @@ static void init_pit_timer(enum clock_event_mode mode,
{
spin_lock(&i8253_lock);
switch(mode) {
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
/* binary, mode 2, LSB/MSB, ch 0 */
outb_pit(0x34, PIT_MODE);
@@ -95,7 +95,7 @@ static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
* registered. This mechanism replaces the previous #ifdef LOCAL_APIC -
* !using_apic_timer decisions in do_timer_interrupt_hook()
*/
static struct clock_event_device pit_clockevent = {
static struct clock_event_device pit_ce = {
.name = "pit",
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.set_mode = init_pit_timer,
@@ -114,15 +114,13 @@ void __init setup_pit_timer(void)
* Start pit with the boot cpu mask and make it global after the
* IO_APIC has been initialized.
*/
pit_clockevent.cpumask = cpumask_of(smp_processor_id());
pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC,
pit_clockevent.shift);
pit_clockevent.max_delta_ns =
clockevent_delta2ns(0x7FFF, &pit_clockevent);
pit_clockevent.min_delta_ns =
clockevent_delta2ns(0xF, &pit_clockevent);
clockevents_register_device(&pit_clockevent);
global_clock_event = &pit_clockevent;
pit_ce.cpumask = cpumask_of(smp_processor_id());
pit_ce.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, pit_ce.shift);
pit_ce.max_delta_ns = clockevent_delta2ns(0x7FFF, &pit_ce);
pit_ce.min_delta_ns = clockevent_delta2ns(0xF, &pit_ce);
clockevents_register_device(&pit_ce);
global_clock_event = &pit_ce;
}
#ifndef CONFIG_X86_64
@@ -133,11 +131,11 @@ void __init setup_pit_timer(void)
*/
static cycle_t pit_read(void)
{
static int old_count;
static u32 old_jifs;
unsigned long flags;
int count;
u32 jifs;
static int old_count;
static u32 old_jifs;
spin_lock_irqsave(&i8253_lock, flags);
/*
@@ -179,9 +177,9 @@ static cycle_t pit_read(void)
* Previous attempts to handle these cases intelligently were
* buggy, so we just do the simple thing now.
*/
if (count > old_count && jifs == old_jifs) {
if (count > old_count && jifs == old_jifs)
count = old_count;
}
old_count = count;
old_jifs = jifs;
@@ -192,13 +190,13 @@ static cycle_t pit_read(void)
return (cycle_t)(jifs * LATCH) + count;
}
static struct clocksource clocksource_pit = {
.name = "pit",
.rating = 110,
.read = pit_read,
.mask = CLOCKSOURCE_MASK(32),
.mult = 0,
.shift = 20,
static struct clocksource pit_cs = {
.name = "pit",
.rating = 110,
.read = pit_read,
.mask = CLOCKSOURCE_MASK(32),
.mult = 0,
.shift = 20,
};
static void pit_disable_clocksource(void)
@@ -206,9 +204,9 @@ static void pit_disable_clocksource(void)
/*
* Use mult to check whether it is registered or not
*/
if (clocksource_pit.mult) {
clocksource_unregister(&clocksource_pit);
clocksource_pit.mult = 0;
if (pit_cs.mult) {
clocksource_unregister(&pit_cs);
pit_cs.mult = 0;
}
}
@@ -222,13 +220,13 @@ static int __init init_pit_clocksource(void)
* - when local APIC timer is active (PIT is switched off)
*/
if (num_possible_cpus() > 1 || is_hpet_enabled() ||
pit_clockevent.mode != CLOCK_EVT_MODE_PERIODIC)
pit_ce.mode != CLOCK_EVT_MODE_PERIODIC)
return 0;
clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE,
clocksource_pit.shift);
return clocksource_register(&clocksource_pit);
pit_cs.mult = clocksource_hz2mult(CLOCK_TICK_RATE, pit_cs.shift);
return clocksource_register(&pit_cs);
}
arch_initcall(init_pit_clocksource);
#endif
#endif /* !CONFIG_X86_64 */

Просмотреть файл

@@ -7,10 +7,10 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/dmi.h>
#include <asm/io.h>
#include <linux/io.h>
int io_delay_type __read_mostly = CONFIG_DEFAULT_IO_DELAY_TYPE;
@@ -47,8 +47,7 @@ EXPORT_SYMBOL(native_io_delay);
static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
{
if (io_delay_type == CONFIG_IO_DELAY_TYPE_0X80) {
printk(KERN_NOTICE "%s: using 0xed I/O delay port\n",
id->ident);
pr_notice("%s: using 0xed I/O delay port\n", id->ident);
io_delay_type = CONFIG_IO_DELAY_TYPE_0XED;
}
@@ -64,40 +63,40 @@ static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
.callback = dmi_io_delay_0xed_port,
.ident = "Compaq Presario V6000",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"),
DMI_MATCH(DMI_BOARD_NAME, "30B7")
DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"),
DMI_MATCH(DMI_BOARD_NAME, "30B7")
}
},
{
.callback = dmi_io_delay_0xed_port,
.ident = "HP Pavilion dv9000z",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"),
DMI_MATCH(DMI_BOARD_NAME, "30B9")
DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"),
DMI_MATCH(DMI_BOARD_NAME, "30B9")
}
},
{
.callback = dmi_io_delay_0xed_port,
.ident = "HP Pavilion dv6000",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"),
DMI_MATCH(DMI_BOARD_NAME, "30B8")
DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"),
DMI_MATCH(DMI_BOARD_NAME, "30B8")
}
},
{
.callback = dmi_io_delay_0xed_port,
.ident = "HP Pavilion tx1000",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"),
DMI_MATCH(DMI_BOARD_NAME, "30BF")
DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"),
DMI_MATCH(DMI_BOARD_NAME, "30BF")
}
},
{
.callback = dmi_io_delay_0xed_port,
.ident = "Presario F700",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"),
DMI_MATCH(DMI_BOARD_NAME, "30D3")
DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"),
DMI_MATCH(DMI_BOARD_NAME, "30D3")
}
},
{ }

Просмотреть файл

@@ -45,19 +45,24 @@ void ack_bad_irq(unsigned int irq)
/*
* /proc/interrupts printing:
*/
static int show_other_interrupts(struct seq_file *p)
static int show_other_interrupts(struct seq_file *p, int prec)
{
int j;
seq_printf(p, "NMI: ");
seq_printf(p, "%*s: ", prec, "NMI");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
seq_printf(p, " Non-maskable interrupts\n");
#ifdef CONFIG_X86_LOCAL_APIC
seq_printf(p, "LOC: ");
seq_printf(p, "%*s: ", prec, "LOC");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
seq_printf(p, " Local timer interrupts\n");
seq_printf(p, "%*s: ", prec, "SPU");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
seq_printf(p, " Spurious interrupts\n");
#endif
if (generic_interrupt_extension) {
seq_printf(p, "PLT: ");
@@ -66,40 +71,34 @@ static int show_other_interrupts(struct seq_file *p)
seq_printf(p, " Platform interrupts\n");
}
#ifdef CONFIG_SMP
seq_printf(p, "RES: ");
seq_printf(p, "%*s: ", prec, "RES");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
seq_printf(p, " Rescheduling interrupts\n");
seq_printf(p, "CAL: ");
seq_printf(p, "%*s: ", prec, "CAL");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
seq_printf(p, " Function call interrupts\n");
seq_printf(p, "TLB: ");
seq_printf(p, "%*s: ", prec, "TLB");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
seq_printf(p, " TLB shootdowns\n");
#endif
#ifdef CONFIG_X86_MCE
seq_printf(p, "TRM: ");
seq_printf(p, "%*s: ", prec, "TRM");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
seq_printf(p, " Thermal event interrupts\n");
# ifdef CONFIG_X86_64
seq_printf(p, "THR: ");
seq_printf(p, "%*s: ", prec, "THR");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
seq_printf(p, " Threshold APIC interrupts\n");
# endif
#endif
#ifdef CONFIG_X86_LOCAL_APIC
seq_printf(p, "SPU: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
seq_printf(p, " Spurious interrupts\n");
#endif
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
#if defined(CONFIG_X86_IO_APIC)
seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
#endif
return 0;
}
@@ -107,19 +106,22 @@ static int show_other_interrupts(struct seq_file *p)
int show_interrupts(struct seq_file *p, void *v)
{
unsigned long flags, any_count = 0;
int i = *(loff_t *) v, j;
int i = *(loff_t *) v, j, prec;
struct irqaction *action;
struct irq_desc *desc;
if (i > nr_irqs)
return 0;
for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
j *= 10;
if (i == nr_irqs)
return show_other_interrupts(p);
return show_other_interrupts(p, prec);
/* print header */
if (i == 0) {
seq_printf(p, " ");
seq_printf(p, "%*s", prec + 8, "");
for_each_online_cpu(j)
seq_printf(p, "CPU%-8d", j);
seq_putc(p, '\n');
@@ -130,23 +132,15 @@ int show_interrupts(struct seq_file *p, void *v)
return 0;
spin_lock_irqsave(&desc->lock, flags);
#ifndef CONFIG_SMP
any_count = kstat_irqs(i);
#else
for_each_online_cpu(j)
any_count |= kstat_irqs_cpu(i, j);
#endif
action = desc->action;
if (!action && !any_count)
goto out;
seq_printf(p, "%3d: ", i);
#ifndef CONFIG_SMP
seq_printf(p, "%10u ", kstat_irqs(i));
#else
seq_printf(p, "%*d: ", prec, i);
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
#endif
seq_printf(p, " %8s", desc->chip->name);
seq_printf(p, "-%-8s", desc->name);
@@ -171,6 +165,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
#ifdef CONFIG_X86_LOCAL_APIC
sum += irq_stats(cpu)->apic_timer_irqs;
sum += irq_stats(cpu)->irq_spurious_count;
#endif
if (generic_interrupt_extension)
sum += irq_stats(cpu)->generic_irqs;
@@ -184,9 +179,6 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
# ifdef CONFIG_X86_64
sum += irq_stats(cpu)->irq_threshold_count;
#endif
#endif
#ifdef CONFIG_X86_LOCAL_APIC
sum += irq_stats(cpu)->irq_spurious_count;
#endif
return sum;
}

Просмотреть файл

@@ -50,7 +50,6 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id)
*/
static struct irqaction fpu_irq = {
.handler = math_error_irq,
.mask = CPU_MASK_NONE,
.name = "fpu",
};
@@ -83,7 +82,6 @@ void __init init_ISA_irqs(void)
*/
static struct irqaction irq2 = {
.handler = no_action,
.mask = CPU_MASK_NONE,
.name = "cascade",
};

Просмотреть файл

@@ -45,7 +45,6 @@
static struct irqaction irq2 = {
.handler = no_action,
.mask = CPU_MASK_NONE,
.name = "cascade",
};
DEFINE_PER_CPU(vector_irq_t, vector_irq) = {

Просмотреть файл

@@ -8,11 +8,11 @@
*/
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/stat.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/stat.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <asm/setup.h>
@@ -26,9 +26,8 @@ struct setup_data_node {
u32 len;
};
static ssize_t
setup_data_read(struct file *file, char __user *user_buf, size_t count,
loff_t *ppos)
static ssize_t setup_data_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct setup_data_node *node = file->private_data;
unsigned long remain;
@@ -39,20 +38,21 @@ setup_data_read(struct file *file, char __user *user_buf, size_t count,
if (pos < 0)
return -EINVAL;
if (pos >= node->len)
return 0;
if (count > node->len - pos)
count = node->len - pos;
pa = node->paddr + sizeof(struct setup_data) + pos;
pg = pfn_to_page((pa + count - 1) >> PAGE_SHIFT);
if (PageHighMem(pg)) {
p = ioremap_cache(pa, count);
if (!p)
return -ENXIO;
} else {
} else
p = __va(pa);
}
remain = copy_to_user(user_buf, p, count);
@@ -70,12 +70,13 @@ setup_data_read(struct file *file, char __user *user_buf, size_t count,
static int setup_data_open(struct inode *inode, struct file *file)
{
file->private_data = inode->i_private;
return 0;
}
static const struct file_operations fops_setup_data = {
.read = setup_data_read,
.open = setup_data_open,
.read = setup_data_read,
.open = setup_data_open,
};
static int __init
@@ -84,57 +85,50 @@ create_setup_data_node(struct dentry *parent, int no,
{
struct dentry *d, *type, *data;
char buf[16];
int error;
sprintf(buf, "%d", no);
d = debugfs_create_dir(buf, parent);
if (!d) {
error = -ENOMEM;
goto err_return;
}
if (!d)
return -ENOMEM;
type = debugfs_create_x32("type", S_IRUGO, d, &node->type);
if (!type) {
error = -ENOMEM;
if (!type)
goto err_dir;
}
data = debugfs_create_file("data", S_IRUGO, d, node, &fops_setup_data);
if (!data) {
error = -ENOMEM;
if (!data)
goto err_type;
}
return 0;
err_type:
debugfs_remove(type);
err_dir:
debugfs_remove(d);
err_return:
return error;
return -ENOMEM;
}
static int __init create_setup_data_nodes(struct dentry *parent)
{
struct setup_data_node *node;
struct setup_data *data;
int error, no = 0;
int error = -ENOMEM;
struct dentry *d;
struct page *pg;
u64 pa_data;
int no = 0;
d = debugfs_create_dir("setup_data", parent);
if (!d) {
error = -ENOMEM;
goto err_return;
}
if (!d)
return -ENOMEM;
pa_data = boot_params.hdr.setup_data;
while (pa_data) {
node = kmalloc(sizeof(*node), GFP_KERNEL);
if (!node) {
error = -ENOMEM;
if (!node)
goto err_dir;
}
pg = pfn_to_page((pa_data+sizeof(*data)-1) >> PAGE_SHIFT);
if (PageHighMem(pg)) {
data = ioremap_cache(pa_data, sizeof(*data));
@@ -143,9 +137,8 @@ static int __init create_setup_data_nodes(struct dentry *parent)
error = -ENXIO;
goto err_dir;
}
} else {
} else
data = __va(pa_data);
}
node->paddr = pa_data;
node->type = data->type;
@@ -159,11 +152,11 @@ static int __init create_setup_data_nodes(struct dentry *parent)
goto err_dir;
no++;
}
return 0;
err_dir:
debugfs_remove(d);
err_return:
return error;
}
@@ -175,28 +168,26 @@ static struct debugfs_blob_wrapper boot_params_blob = {
static int __init boot_params_kdebugfs_init(void)
{
struct dentry *dbp, *version, *data;
int error;
int error = -ENOMEM;
dbp = debugfs_create_dir("boot_params", NULL);
if (!dbp) {
error = -ENOMEM;
goto err_return;
}
if (!dbp)
return -ENOMEM;
version = debugfs_create_x16("version", S_IRUGO, dbp,
&boot_params.hdr.version);
if (!version) {
error = -ENOMEM;
if (!version)
goto err_dir;
}
data = debugfs_create_blob("data", S_IRUGO, dbp,
&boot_params_blob);
if (!data) {
error = -ENOMEM;
if (!data)
goto err_version;
}
error = create_setup_data_nodes(dbp);
if (error)
goto err_data;
return 0;
err_data:
@@ -205,10 +196,9 @@ err_version:
debugfs_remove(version);
err_dir:
debugfs_remove(dbp);
err_return:
return error;
}
#endif
#endif /* CONFIG_DEBUG_BOOT_PARAMS */
static int __init arch_kdebugfs_init(void)
{

Просмотреть файл

@@ -193,6 +193,9 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
kprobe_opcode_t opcode;
kprobe_opcode_t *orig_opcodes = opcodes;
if (search_exception_tables((unsigned long)opcodes))
return 0; /* Page fault may occur on this address. */
retry:
if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
return 0;
@@ -635,13 +638,13 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
#else
" pushf\n"
/*
* Skip cs, ip, orig_ax.
* Skip cs, ip, orig_ax and gs.
* trampoline_handler() will plug in these values
*/
" subl $12, %esp\n"
" subl $16, %esp\n"
" pushl %fs\n"
" pushl %ds\n"
" pushl %es\n"
" pushl %ds\n"
" pushl %eax\n"
" pushl %ebp\n"
" pushl %edi\n"
@@ -652,10 +655,10 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
" movl %esp, %eax\n"
" call trampoline_handler\n"
/* Move flags to cs */
" movl 52(%esp), %edx\n"
" movl %edx, 48(%esp)\n"
" movl 56(%esp), %edx\n"
" movl %edx, 52(%esp)\n"
/* Replace saved flags with true return address. */
" movl %eax, 52(%esp)\n"
" movl %eax, 56(%esp)\n"
" popl %ebx\n"
" popl %ecx\n"
" popl %edx\n"
@@ -663,8 +666,8 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
" popl %edi\n"
" popl %ebp\n"
" popl %eax\n"
/* Skip ip, orig_ax, es, ds, fs */
" addl $20, %esp\n"
/* Skip ds, es, fs, gs, orig_ax and ip */
" addl $24, %esp\n"
" popf\n"
#endif
" ret\n");
@@ -688,6 +691,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
regs->cs = __KERNEL_CS;
#else
regs->cs = __KERNEL_CS | get_kernel_rpl();
regs->gs = 0;
#endif
regs->ip = trampoline_address;
regs->orig_ax = ~0UL;

Просмотреть файл

@@ -138,12 +138,6 @@ static void kvm_set_pte_atomic(pte_t *ptep, pte_t pte)
kvm_mmu_write(ptep, pte_val(pte));
}
static void kvm_set_pte_present(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
kvm_mmu_write(ptep, pte_val(pte));
}
static void kvm_pte_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
@@ -220,7 +214,6 @@ static void paravirt_ops_setup(void)
#if PAGETABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE
pv_mmu_ops.set_pte_atomic = kvm_set_pte_atomic;
pv_mmu_ops.set_pte_present = kvm_set_pte_present;
pv_mmu_ops.pte_clear = kvm_pte_clear;
pv_mmu_ops.pmd_clear = kvm_pmd_clear;
#endif

Просмотреть файл

@@ -14,12 +14,12 @@
#include <linux/ftrace.h>
#include <linux/suspend.h>
#include <linux/gfp.h>
#include <linux/io.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/apic.h>
#include <asm/cpufeature.h>
#include <asm/desc.h>
@@ -63,7 +63,7 @@ static void load_segments(void)
"\tmovl %%eax,%%fs\n"
"\tmovl %%eax,%%gs\n"
"\tmovl %%eax,%%ss\n"
::: "eax", "memory");
: : : "eax", "memory");
#undef STR
#undef __STR
}
@@ -205,7 +205,8 @@ void machine_kexec(struct kimage *image)
if (image->preserve_context) {
#ifdef CONFIG_X86_IO_APIC
/* We need to put APICs in legacy mode so that we can
/*
* We need to put APICs in legacy mode so that we can
* get timer interrupts in second kernel. kexec/kdump
* paths already have calls to disable_IO_APIC() in
* one form or other. kexec jump path also need
@@ -227,7 +228,8 @@ void machine_kexec(struct kimage *image)
page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page)
<< PAGE_SHIFT);
/* The segment registers are funny things, they have both a
/*
* The segment registers are funny things, they have both a
* visible and an invisible part. Whenever the visible part is
* set to a specific selector, the invisible part is loaded
* with from a table in memory. At no other time is the
@@ -237,11 +239,12 @@ void machine_kexec(struct kimage *image)
* segments, before I zap the gdt with an invalid value.
*/
load_segments();
/* The gdt & idt are now invalid.
/*
* The gdt & idt are now invalid.
* If you want to load them you must set up your own idt & gdt.
*/
set_gdt(phys_to_virt(0),0);
set_idt(phys_to_virt(0),0);
set_gdt(phys_to_virt(0), 0);
set_idt(phys_to_virt(0), 0);
/* now call it */
image->start = relocate_kernel_ptr((unsigned long)image->head,

Просмотреть файл

@@ -12,11 +12,47 @@
#include <linux/reboot.h>
#include <linux/numa.h>
#include <linux/ftrace.h>
#include <linux/io.h>
#include <linux/suspend.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
static int init_one_level2_page(struct kimage *image, pgd_t *pgd,
unsigned long addr)
{
pud_t *pud;
pmd_t *pmd;
struct page *page;
int result = -ENOMEM;
addr &= PMD_MASK;
pgd += pgd_index(addr);
if (!pgd_present(*pgd)) {
page = kimage_alloc_control_pages(image, 0);
if (!page)
goto out;
pud = (pud_t *)page_address(page);
memset(pud, 0, PAGE_SIZE);
set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
}
pud = pud_offset(pgd, addr);
if (!pud_present(*pud)) {
page = kimage_alloc_control_pages(image, 0);
if (!page)
goto out;
pmd = (pmd_t *)page_address(page);
memset(pmd, 0, PAGE_SIZE);
set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
}
pmd = pmd_offset(pud, addr);
if (!pmd_present(*pmd))
set_pmd(pmd, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
result = 0;
out:
return result;
}
static void init_level2_page(pmd_t *level2p, unsigned long addr)
{
@@ -83,9 +119,8 @@ static int init_level4_page(struct kimage *image, pgd_t *level4p,
}
level3p = (pud_t *)page_address(page);
result = init_level3_page(image, level3p, addr, last_addr);
if (result) {
if (result)
goto out;
}
set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE));
addr += PGDIR_SIZE;
}
@@ -154,6 +189,13 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
int result;
level4p = (pgd_t *)__va(start_pgtable);
result = init_level4_page(image, level4p, 0, max_pfn << PAGE_SHIFT);
if (result)
return result;
/*
* image->start may be outside 0 ~ max_pfn, for example when
* jump back to original kernel from kexeced kernel
*/
result = init_one_level2_page(image, level4p, image->start);
if (result)
return result;
return init_transition_pgtable(image, level4p);
@@ -229,20 +271,45 @@ void machine_kexec(struct kimage *image)
{
unsigned long page_list[PAGES_NR];
void *control_page;
int save_ftrace_enabled;
tracer_disable();
#ifdef CONFIG_KEXEC_JUMP
if (kexec_image->preserve_context)
save_processor_state();
#endif
save_ftrace_enabled = __ftrace_enabled_save();
/* Interrupts aren't acceptable while we reboot */
local_irq_disable();
if (image->preserve_context) {
#ifdef CONFIG_X86_IO_APIC
/*
* We need to put APICs in legacy mode so that we can
* get timer interrupts in second kernel. kexec/kdump
* paths already have calls to disable_IO_APIC() in
* one form or other. kexec jump path also need
* one.
*/
disable_IO_APIC();
#endif
}
control_page = page_address(image->control_code_page) + PAGE_SIZE;
memcpy(control_page, relocate_kernel, PAGE_SIZE);
memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
page_list[VA_CONTROL_PAGE] = (unsigned long)control_page;
page_list[PA_TABLE_PAGE] =
(unsigned long)__pa(page_address(image->control_code_page));
/* The segment registers are funny things, they have both a
if (image->type == KEXEC_TYPE_DEFAULT)
page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page)
<< PAGE_SHIFT);
/*
* The segment registers are funny things, they have both a
* visible and an invisible part. Whenever the visible part is
* set to a specific selector, the invisible part is loaded
* with from a table in memory. At no other time is the
@@ -252,15 +319,25 @@ void machine_kexec(struct kimage *image)
* segments, before I zap the gdt with an invalid value.
*/
load_segments();
/* The gdt & idt are now invalid.
/*
* The gdt & idt are now invalid.
* If you want to load them you must set up your own idt & gdt.
*/
set_gdt(phys_to_virt(0),0);
set_idt(phys_to_virt(0),0);
set_gdt(phys_to_virt(0), 0);
set_idt(phys_to_virt(0), 0);
/* now call it */
relocate_kernel((unsigned long)image->head, (unsigned long)page_list,
image->start);
image->start = relocate_kernel((unsigned long)image->head,
(unsigned long)page_list,
image->start,
image->preserve_context);
#ifdef CONFIG_KEXEC_JUMP
if (kexec_image->preserve_context)
restore_processor_state();
#endif
__ftrace_enabled_restore(save_ftrace_enabled);
}
void arch_crash_save_vmcoreinfo(void)

Просмотреть файл

@@ -348,7 +348,6 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id)
static struct irqaction mfgptirq = {
.handler = mfgpt_tick,
.flags = IRQF_DISABLED | IRQF_NOBALANCING,
.mask = CPU_MASK_NONE,
.name = "mfgpt-timer"
};

Просмотреть файл

@@ -12,31 +12,30 @@
*
* Licensed under the terms of the GNU General Public
* License version 2. See file COPYING for details.
*/
#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/miscdevice.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/mutex.h>
#include <linux/cpu.h>
#include <linux/firmware.h>
*/
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/capability.h>
#include <linux/miscdevice.h>
#include <linux/firmware.h>
#include <linux/spinlock.h>
#include <linux/cpumask.h>
#include <linux/pci_ids.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/pci.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <asm/msr.h>
#include <asm/processor.h>
#include <asm/microcode.h>
#include <asm/processor.h>
#include <asm/msr.h>
MODULE_DESCRIPTION("AMD Microcode Update Driver");
MODULE_AUTHOR("Peter Oruba");
@@ -72,8 +71,8 @@ struct microcode_header_amd {
} __attribute__((packed));
struct microcode_amd {
struct microcode_header_amd hdr;
unsigned int mpb[0];
struct microcode_header_amd hdr;
unsigned int mpb[0];
};
#define UCODE_MAX_SIZE 2048
@@ -184,8 +183,8 @@ static int get_ucode_data(void *to, const u8 *from, size_t n)
return 0;
}
static void *get_next_ucode(const u8 *buf, unsigned int size,
unsigned int *mc_size)
static void *
get_next_ucode(const u8 *buf, unsigned int size, unsigned int *mc_size)
{
unsigned int total_size;
u8 section_hdr[UCODE_CONTAINER_SECTION_HDR];
@@ -223,7 +222,6 @@ static void *get_next_ucode(const u8 *buf, unsigned int size,
return mc;
}
static int install_equiv_cpu_table(const u8 *buf)
{
u8 *container_hdr[UCODE_CONTAINER_HEADER_SIZE];
@@ -372,4 +370,3 @@ struct microcode_ops * __init init_amd_microcode(void)
{
return &microcode_amd_ops;
}

Просмотреть файл

@@ -70,67 +70,78 @@
* Fix sigmatch() macro to handle old CPUs with pf == 0.
* Thanks to Stuart Swales for pointing out this bug.
*/
#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/smp_lock.h>
#include <linux/cpumask.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/miscdevice.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/mutex.h>
#include <linux/cpu.h>
#include <linux/firmware.h>
#include <linux/platform_device.h>
#include <linux/capability.h>
#include <linux/miscdevice.h>
#include <linux/firmware.h>
#include <linux/smp_lock.h>
#include <linux/spinlock.h>
#include <linux/cpumask.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <asm/msr.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
#include <asm/microcode.h>
#include <asm/processor.h>
#include <asm/msr.h>
MODULE_DESCRIPTION("Microcode Update Driver");
MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>");
MODULE_LICENSE("GPL");
#define MICROCODE_VERSION "2.00"
#define MICROCODE_VERSION "2.00"
static struct microcode_ops *microcode_ops;
static struct microcode_ops *microcode_ops;
/* no concurrent ->write()s are allowed on /dev/cpu/microcode */
static DEFINE_MUTEX(microcode_mutex);
struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
EXPORT_SYMBOL_GPL(ucode_cpu_info);
#ifdef CONFIG_MICROCODE_OLD_INTERFACE
struct update_for_cpu {
const void __user *buf;
size_t size;
};
static long update_for_cpu(void *_ufc)
{
struct update_for_cpu *ufc = _ufc;
int error;
error = microcode_ops->request_microcode_user(smp_processor_id(),
ufc->buf, ufc->size);
if (error < 0)
return error;
if (!error)
microcode_ops->apply_microcode(smp_processor_id());
return error;
}
static int do_microcode_update(const void __user *buf, size_t size)
{
cpumask_t old;
int error = 0;
int cpu;
old = current->cpus_allowed;
struct update_for_cpu ufc = { .buf = buf, .size = size };
for_each_online_cpu(cpu) {
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
if (!uci->valid)
continue;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
error = microcode_ops->request_microcode_user(cpu, buf, size);
error = work_on_cpu(cpu, update_for_cpu, &ufc);
if (error < 0)
goto out;
if (!error)
microcode_ops->apply_microcode(cpu);
break;
}
out:
set_cpus_allowed_ptr(current, &old);
return error;
}
@@ -198,18 +209,33 @@ static void microcode_dev_exit(void)
MODULE_ALIAS_MISCDEV(MICROCODE_MINOR);
#else
#define microcode_dev_init() 0
#define microcode_dev_exit() do { } while (0)
#define microcode_dev_init() 0
#define microcode_dev_exit() do { } while (0)
#endif
/* fake device for request_firmware */
static struct platform_device *microcode_pdev;
static struct platform_device *microcode_pdev;
static long reload_for_cpu(void *unused)
{
struct ucode_cpu_info *uci = ucode_cpu_info + smp_processor_id();
int err = 0;
mutex_lock(&microcode_mutex);
if (uci->valid) {
err = microcode_ops->request_microcode_fw(smp_processor_id(),
&microcode_pdev->dev);
if (!err)
microcode_ops->apply_microcode(smp_processor_id());
}
mutex_unlock(&microcode_mutex);
return err;
}
static ssize_t reload_store(struct sys_device *dev,
struct sysdev_attribute *attr,
const char *buf, size_t sz)
{
struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
char *end;
unsigned long val = simple_strtoul(buf, &end, 0);
int err = 0;
@@ -218,21 +244,9 @@ static ssize_t reload_store(struct sys_device *dev,
if (end == buf)
return -EINVAL;
if (val == 1) {
cpumask_t old = current->cpus_allowed;
get_online_cpus();
if (cpu_online(cpu)) {
set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
mutex_lock(&microcode_mutex);
if (uci->valid) {
err = microcode_ops->request_microcode_fw(cpu,
&microcode_pdev->dev);
if (!err)
microcode_ops->apply_microcode(cpu);
}
mutex_unlock(&microcode_mutex);
set_cpus_allowed_ptr(current, &old);
}
if (cpu_online(cpu))
err = work_on_cpu(cpu, reload_for_cpu, NULL);
put_online_cpus();
}
if (err)
@@ -268,8 +282,8 @@ static struct attribute *mc_default_attrs[] = {
};
static struct attribute_group mc_attr_group = {
.attrs = mc_default_attrs,
.name = "microcode",
.attrs = mc_default_attrs,
.name = "microcode",
};
static void __microcode_fini_cpu(int cpu)
@@ -328,9 +342,9 @@ static int microcode_resume_cpu(int cpu)
return 0;
}
static void microcode_update_cpu(int cpu)
static long microcode_update_cpu(void *unused)
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
struct ucode_cpu_info *uci = ucode_cpu_info + smp_processor_id();
int err = 0;
/*
@@ -338,30 +352,27 @@ static void microcode_update_cpu(int cpu)
* otherwise just request a firmware:
*/
if (uci->valid) {
err = microcode_resume_cpu(cpu);
} else {
collect_cpu_info(cpu);
err = microcode_resume_cpu(smp_processor_id());
} else {
collect_cpu_info(smp_processor_id());
if (uci->valid && system_state == SYSTEM_RUNNING)
err = microcode_ops->request_microcode_fw(cpu,
err = microcode_ops->request_microcode_fw(
smp_processor_id(),
&microcode_pdev->dev);
}
if (!err)
microcode_ops->apply_microcode(cpu);
microcode_ops->apply_microcode(smp_processor_id());
return err;
}
static void microcode_init_cpu(int cpu)
static int microcode_init_cpu(int cpu)
{
cpumask_t old = current->cpus_allowed;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
/* We should bind the task to the CPU */
BUG_ON(raw_smp_processor_id() != cpu);
int err;
mutex_lock(&microcode_mutex);
microcode_update_cpu(cpu);
err = work_on_cpu(cpu, microcode_update_cpu, NULL);
mutex_unlock(&microcode_mutex);
set_cpus_allowed_ptr(current, &old);
return err;
}
static int mc_sysdev_add(struct sys_device *sys_dev)
@@ -379,8 +390,11 @@ static int mc_sysdev_add(struct sys_device *sys_dev)
if (err)
return err;
microcode_init_cpu(cpu);
return 0;
err = microcode_init_cpu(cpu);
if (err)
sysfs_remove_group(&sys_dev->kobj, &mc_attr_group);
return err;
}
static int mc_sysdev_remove(struct sys_device *sys_dev)
@@ -404,14 +418,14 @@ static int mc_sysdev_resume(struct sys_device *dev)
return 0;
/* only CPU 0 will apply ucode here */
microcode_update_cpu(0);
microcode_update_cpu(NULL);
return 0;
}
static struct sysdev_driver mc_sysdev_driver = {
.add = mc_sysdev_add,
.remove = mc_sysdev_remove,
.resume = mc_sysdev_resume,
.add = mc_sysdev_add,
.remove = mc_sysdev_remove,
.resume = mc_sysdev_resume,
};
static __cpuinit int
@@ -424,7 +438,9 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
microcode_init_cpu(cpu);
if (microcode_init_cpu(cpu))
printk(KERN_ERR "microcode: failed to init CPU%d\n",
cpu);
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
pr_debug("microcode: CPU%d added\n", cpu);
@@ -448,7 +464,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
}
static struct notifier_block __refdata mc_cpu_notifier = {
.notifier_call = mc_cpu_callback,
.notifier_call = mc_cpu_callback,
};
static int __init microcode_init(void)

Просмотреть файл

@@ -70,28 +70,28 @@
* Fix sigmatch() macro to handle old CPUs with pf == 0.
* Thanks to Stuart Swales for pointing out this bug.
*/
#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/smp_lock.h>
#include <linux/cpumask.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/miscdevice.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/mutex.h>
#include <linux/cpu.h>
#include <linux/firmware.h>
#include <linux/platform_device.h>
#include <linux/capability.h>
#include <linux/miscdevice.h>
#include <linux/firmware.h>
#include <linux/smp_lock.h>
#include <linux/spinlock.h>
#include <linux/cpumask.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <asm/msr.h>
#include <asm/processor.h>
#include <asm/microcode.h>
#include <asm/processor.h>
#include <asm/msr.h>
MODULE_DESCRIPTION("Microcode Update Driver");
MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>");
@@ -129,12 +129,13 @@ struct extended_sigtable {
struct extended_signature sigs[0];
};
#define DEFAULT_UCODE_DATASIZE (2000)
#define DEFAULT_UCODE_DATASIZE (2000)
#define MC_HEADER_SIZE (sizeof(struct microcode_header_intel))
#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE)
#define EXT_HEADER_SIZE (sizeof(struct extended_sigtable))
#define EXT_SIGNATURE_SIZE (sizeof(struct extended_signature))
#define DWSIZE (sizeof(u32))
#define get_totalsize(mc) \
(((struct microcode_intel *)mc)->hdr.totalsize ? \
((struct microcode_intel *)mc)->hdr.totalsize : \
@@ -197,30 +198,31 @@ static inline int update_match_cpu(struct cpu_signature *csig, int sig, int pf)
}
static inline int
update_match_revision(struct microcode_header_intel *mc_header, int rev)
update_match_revision(struct microcode_header_intel *mc_header, int rev)
{
return (mc_header->rev <= rev) ? 0 : 1;
}
static int microcode_sanity_check(void *mc)
{
unsigned long total_size, data_size, ext_table_size;
struct microcode_header_intel *mc_header = mc;
struct extended_sigtable *ext_header = NULL;
struct extended_signature *ext_sig;
unsigned long total_size, data_size, ext_table_size;
int sum, orig_sum, ext_sigcount = 0, i;
struct extended_signature *ext_sig;
total_size = get_totalsize(mc_header);
data_size = get_datasize(mc_header);
if (data_size + MC_HEADER_SIZE > total_size) {
printk(KERN_ERR "microcode: error! "
"Bad data size in microcode data file\n");
"Bad data size in microcode data file\n");
return -EINVAL;
}
if (mc_header->ldrver != 1 || mc_header->hdrver != 1) {
printk(KERN_ERR "microcode: error! "
"Unknown microcode update format\n");
"Unknown microcode update format\n");
return -EINVAL;
}
ext_table_size = total_size - (MC_HEADER_SIZE + data_size);
@@ -318,11 +320,15 @@ get_matching_microcode(struct cpu_signature *cpu_sig, void *mc, int rev)
static void apply_microcode(int cpu)
{
struct microcode_intel *mc_intel;
struct ucode_cpu_info *uci;
unsigned long flags;
unsigned int val[2];
int cpu_num = raw_smp_processor_id();
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
struct microcode_intel *mc_intel = uci->mc;
int cpu_num;
cpu_num = raw_smp_processor_id();
uci = ucode_cpu_info + cpu;
mc_intel = uci->mc;
/* We should bind the task to the CPU */
BUG_ON(cpu_num != cpu);
@@ -348,15 +354,17 @@ static void apply_microcode(int cpu)
spin_unlock_irqrestore(&microcode_update_lock, flags);
if (val[1] != mc_intel->hdr.rev) {
printk(KERN_ERR "microcode: CPU%d update from revision "
"0x%x to 0x%x failed\n", cpu_num, uci->cpu_sig.rev, val[1]);
"0x%x to 0x%x failed\n",
cpu_num, uci->cpu_sig.rev, val[1]);
return;
}
printk(KERN_INFO "microcode: CPU%d updated from revision "
"0x%x to 0x%x, date = %04x-%02x-%02x \n",
"0x%x to 0x%x, date = %04x-%02x-%02x \n",
cpu_num, uci->cpu_sig.rev, val[1],
mc_intel->hdr.date & 0xffff,
mc_intel->hdr.date >> 24,
(mc_intel->hdr.date >> 16) & 0xff);
uci->cpu_sig.rev = val[1];
}
@@ -404,18 +412,23 @@ static int generic_load_microcode(int cpu, void *data, size_t size,
leftover -= mc_size;
}
if (new_mc) {
if (!leftover) {
if (uci->mc)
vfree(uci->mc);
uci->mc = (struct microcode_intel *)new_mc;
pr_debug("microcode: CPU%d found a matching microcode update with"
" version 0x%x (current=0x%x)\n",
cpu, new_rev, uci->cpu_sig.rev);
} else
vfree(new_mc);
if (!new_mc)
goto out;
if (leftover) {
vfree(new_mc);
goto out;
}
if (uci->mc)
vfree(uci->mc);
uci->mc = (struct microcode_intel *)new_mc;
pr_debug("microcode: CPU%d found a matching microcode update with"
" version 0x%x (current=0x%x)\n",
cpu, new_rev, uci->cpu_sig.rev);
out:
return (int)leftover;
}

Просмотреть файл

@@ -226,7 +226,7 @@ static int __devinit set_check_enable_amd_mmconf(const struct dmi_system_id *d)
return 0;
}
static struct dmi_system_id __devinitdata mmconf_dmi_table[] = {
static const struct dmi_system_id __cpuinitconst mmconf_dmi_table[] = {
{
.callback = set_check_enable_amd_mmconf,
.ident = "Sun Microsystems Machine",

Просмотреть файл

@@ -109,9 +109,6 @@ static void __init MP_bus_info(struct mpc_bus *m)
} else
printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
}
#endif
#ifdef CONFIG_X86_IO_APIC
static int bad_ioapic(unsigned long address)
{
@@ -224,8 +221,12 @@ static void __init MP_intsrc_info(struct mpc_intsrc *m)
if (++mp_irq_entries == MAX_IRQ_SOURCES)
panic("Max # of irq sources exceeded!!\n");
}
#else /* CONFIG_X86_IO_APIC */
static inline void __init MP_bus_info(struct mpc_bus *m) {}
static inline void __init MP_ioapic_info(struct mpc_ioapic *m) {}
static inline void __init MP_intsrc_info(struct mpc_intsrc *m) {}
#endif /* CONFIG_X86_IO_APIC */
#endif
static void __init MP_lintsrc_info(struct mpc_lintsrc *m)
{
@@ -275,6 +276,20 @@ static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str)
return 1;
}
static void skip_entry(unsigned char **ptr, int *count, int size)
{
*ptr += size;
*count += size;
}
static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt)
{
printk(KERN_ERR "Your mptable is wrong, contact your HW vendor!\n"
"type %x\n", *mpt);
print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_ADDRESS, 16,
1, mpc, mpc->length, 1);
}
static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
{
char str[16];
@@ -310,61 +325,30 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
while (count < mpc->length) {
switch (*mpt) {
case MP_PROCESSOR:
{
struct mpc_cpu *m = (struct mpc_cpu *)mpt;
/* ACPI may have already provided this data */
if (!acpi_lapic)
MP_processor_info(m);
mpt += sizeof(*m);
count += sizeof(*m);
break;
}
/* ACPI may have already provided this data */
if (!acpi_lapic)
MP_processor_info((struct mpc_cpu *)mpt);
skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
break;
case MP_BUS:
{
struct mpc_bus *m = (struct mpc_bus *)mpt;
#ifdef CONFIG_X86_IO_APIC
MP_bus_info(m);
#endif
mpt += sizeof(*m);
count += sizeof(*m);
break;
}
MP_bus_info((struct mpc_bus *)mpt);
skip_entry(&mpt, &count, sizeof(struct mpc_bus));
break;
case MP_IOAPIC:
{
#ifdef CONFIG_X86_IO_APIC
struct mpc_ioapic *m = (struct mpc_ioapic *)mpt;
MP_ioapic_info(m);
#endif
mpt += sizeof(struct mpc_ioapic);
count += sizeof(struct mpc_ioapic);
break;
}
MP_ioapic_info((struct mpc_ioapic *)mpt);
skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
break;
case MP_INTSRC:
{
#ifdef CONFIG_X86_IO_APIC
struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
MP_intsrc_info(m);
#endif
mpt += sizeof(struct mpc_intsrc);
count += sizeof(struct mpc_intsrc);
break;
}
MP_intsrc_info((struct mpc_intsrc *)mpt);
skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
break;
case MP_LINTSRC:
{
struct mpc_lintsrc *m =
(struct mpc_lintsrc *)mpt;
MP_lintsrc_info(m);
mpt += sizeof(*m);
count += sizeof(*m);
break;
}
MP_lintsrc_info((struct mpc_lintsrc *)mpt);
skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
break;
default:
/* wrong mptable */
printk(KERN_ERR "Your mptable is wrong, contact your HW vendor!\n");
printk(KERN_ERR "type %x\n", *mpt);
print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_ADDRESS, 16,
1, mpc, mpc->length, 1);
smp_dump_mptable(mpc, mpt);
count = mpc->length;
break;
}
@@ -571,6 +555,55 @@ static unsigned long __init get_mpc_size(unsigned long physptr)
return size;
}
static int __init check_physptr(struct mpf_intel *mpf, unsigned int early)
{
struct mpc_table *mpc;
unsigned long size;
size = get_mpc_size(mpf->physptr);
mpc = early_ioremap(mpf->physptr, size);
/*
* Read the physical hardware table. Anything here will
* override the defaults.
*/
if (!smp_read_mpc(mpc, early)) {
#ifdef CONFIG_X86_LOCAL_APIC
smp_found_config = 0;
#endif
printk(KERN_ERR "BIOS bug, MP table errors detected!...\n"
"... disabling SMP support. (tell your hw vendor)\n");
early_iounmap(mpc, size);
return -1;
}
early_iounmap(mpc, size);
if (early)
return -1;
#ifdef CONFIG_X86_IO_APIC
/*
* If there are no explicit MP IRQ entries, then we are
* broken. We set up most of the low 16 IO-APIC pins to
* ISA defaults and hope it will work.
*/
if (!mp_irq_entries) {
struct mpc_bus bus;
printk(KERN_ERR "BIOS bug, no explicit IRQ entries, "
"using default mptable. (tell your hw vendor)\n");
bus.type = MP_BUS;
bus.busid = 0;
memcpy(bus.bustype, "ISA ", 6);
MP_bus_info(&bus);
construct_default_ioirq_mptable(0);
}
#endif
return 0;
}
/*
* Scan the memory blocks for an SMP configuration block.
*/
@@ -624,51 +657,8 @@ static void __init __get_smp_config(unsigned int early)
construct_default_ISA_mptable(mpf->feature1);
} else if (mpf->physptr) {
struct mpc_table *mpc;
unsigned long size;
size = get_mpc_size(mpf->physptr);
mpc = early_ioremap(mpf->physptr, size);
/*
* Read the physical hardware table. Anything here will
* override the defaults.
*/
if (!smp_read_mpc(mpc, early)) {
#ifdef CONFIG_X86_LOCAL_APIC
smp_found_config = 0;
#endif
printk(KERN_ERR
"BIOS bug, MP table errors detected!...\n");
printk(KERN_ERR "... disabling SMP support. "
"(tell your hw vendor)\n");
early_iounmap(mpc, size);
if (check_physptr(mpf, early))
return;
}
early_iounmap(mpc, size);
if (early)
return;
#ifdef CONFIG_X86_IO_APIC
/*
* If there are no explicit MP IRQ entries, then we are
* broken. We set up most of the low 16 IO-APIC pins to
* ISA defaults and hope it will work.
*/
if (!mp_irq_entries) {
struct mpc_bus bus;
printk(KERN_ERR "BIOS bug, no explicit IRQ entries, "
"using default mptable. "
"(tell your hw vendor)\n");
bus.type = MP_BUS;
bus.busid = 0;
memcpy(bus.bustype, "ISA ", 6);
MP_bus_info(&bus);
construct_default_ioirq_mptable(0);
}
#endif
} else
BUG();
@@ -689,6 +679,31 @@ void __init get_smp_config(void)
__get_smp_config(0);
}
static void smp_reserve_bootmem(struct mpf_intel *mpf)
{
unsigned long size = get_mpc_size(mpf->physptr);
#ifdef CONFIG_X86_32
/*
* We cannot access to MPC table to compute table size yet,
* as only few megabytes from the bottom is mapped now.
* PC-9800's MPC table places on the very last of physical
* memory; so that simply reserving PAGE_SIZE from mpf->physptr
* yields BUG() in reserve_bootmem.
* also need to make sure physptr is below than max_low_pfn
* we don't need reserve the area above max_low_pfn
*/
unsigned long end = max_low_pfn * PAGE_SIZE;
if (mpf->physptr < end) {
if (mpf->physptr + size > end)
size = end - mpf->physptr;
reserve_bootmem_generic(mpf->physptr, size, BOOTMEM_DEFAULT);
}
#else
reserve_bootmem_generic(mpf->physptr, size, BOOTMEM_DEFAULT);
#endif
}
static int __init smp_scan_config(unsigned long base, unsigned long length,
unsigned reserve)
{
@@ -717,35 +732,9 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
if (!reserve)
return 1;
reserve_bootmem_generic(virt_to_phys(mpf), sizeof(*mpf),
BOOTMEM_DEFAULT);
if (mpf->physptr) {
unsigned long size = get_mpc_size(mpf->physptr);
#ifdef CONFIG_X86_32
/*
* We cannot access to MPC table to compute
* table size yet, as only few megabytes from
* the bottom is mapped now.
* PC-9800's MPC table places on the very last
* of physical memory; so that simply reserving
* PAGE_SIZE from mpf->physptr yields BUG()
* in reserve_bootmem.
* also need to make sure physptr is below than
* max_low_pfn
* we don't need reserve the area above max_low_pfn
*/
unsigned long end = max_low_pfn * PAGE_SIZE;
if (mpf->physptr < end) {
if (mpf->physptr + size > end)
size = end - mpf->physptr;
reserve_bootmem_generic(mpf->physptr, size,
BOOTMEM_DEFAULT);
}
#else
reserve_bootmem_generic(mpf->physptr, size,
BOOTMEM_DEFAULT);
#endif
}
if (mpf->physptr)
smp_reserve_bootmem(mpf);
return 1;
}
@@ -848,7 +837,57 @@ static int __init get_MP_intsrc_index(struct mpc_intsrc *m)
#define SPARE_SLOT_NUM 20
static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
#endif
static void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
{
int i;
apic_printk(APIC_VERBOSE, "OLD ");
print_MP_intsrc_info(m);
i = get_MP_intsrc_index(m);
if (i > 0) {
assign_to_mpc_intsrc(&mp_irqs[i], m);
apic_printk(APIC_VERBOSE, "NEW ");
print_mp_irq_info(&mp_irqs[i]);
return;
}
if (!i) {
/* legacy, do nothing */
return;
}
if (*nr_m_spare < SPARE_SLOT_NUM) {
/*
* not found (-1), or duplicated (-2) are invalid entries,
* we need to use the slot later
*/
m_spare[*nr_m_spare] = m;
*nr_m_spare += 1;
}
}
#else /* CONFIG_X86_IO_APIC */
static inline void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {}
#endif /* CONFIG_X86_IO_APIC */
static int check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length,
int count)
{
if (!mpc_new_phys) {
pr_info("No spare slots, try to append...take your risk, "
"new mpc_length %x\n", count);
} else {
if (count <= mpc_new_length)
pr_info("No spare slots, try to append..., "
"new mpc_length %x\n", count);
else {
pr_err("mpc_new_length %lx is too small\n",
mpc_new_length);
return -1;
}
}
return 0;
}
static int __init replace_intsrc_all(struct mpc_table *mpc,
unsigned long mpc_new_phys,
@@ -856,77 +895,33 @@ static int __init replace_intsrc_all(struct mpc_table *mpc,
{
#ifdef CONFIG_X86_IO_APIC
int i;
int nr_m_spare = 0;
#endif
int count = sizeof(*mpc);
int nr_m_spare = 0;
unsigned char *mpt = ((unsigned char *)mpc) + count;
printk(KERN_INFO "mpc_length %x\n", mpc->length);
while (count < mpc->length) {
switch (*mpt) {
case MP_PROCESSOR:
{
struct mpc_cpu *m = (struct mpc_cpu *)mpt;
mpt += sizeof(*m);
count += sizeof(*m);
break;
}
skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
break;
case MP_BUS:
{
struct mpc_bus *m = (struct mpc_bus *)mpt;
mpt += sizeof(*m);
count += sizeof(*m);
break;
}
skip_entry(&mpt, &count, sizeof(struct mpc_bus));
break;
case MP_IOAPIC:
{
mpt += sizeof(struct mpc_ioapic);
count += sizeof(struct mpc_ioapic);
break;
}
skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
break;
case MP_INTSRC:
{
#ifdef CONFIG_X86_IO_APIC
struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
printk(KERN_INFO "OLD ");
print_MP_intsrc_info(m);
i = get_MP_intsrc_index(m);
if (i > 0) {
assign_to_mpc_intsrc(&mp_irqs[i], m);
printk(KERN_INFO "NEW ");
print_mp_irq_info(&mp_irqs[i]);
} else if (!i) {
/* legacy, do nothing */
} else if (nr_m_spare < SPARE_SLOT_NUM) {
/*
* not found (-1), or duplicated (-2)
* are invalid entries,
* we need to use the slot later
*/
m_spare[nr_m_spare] = m;
nr_m_spare++;
}
#endif
mpt += sizeof(struct mpc_intsrc);
count += sizeof(struct mpc_intsrc);
break;
}
check_irq_src((struct mpc_intsrc *)mpt, &nr_m_spare);
skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
break;
case MP_LINTSRC:
{
struct mpc_lintsrc *m =
(struct mpc_lintsrc *)mpt;
mpt += sizeof(*m);
count += sizeof(*m);
break;
}
skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
break;
default:
/* wrong mptable */
printk(KERN_ERR "Your mptable is wrong, contact your HW vendor!\n");
printk(KERN_ERR "type %x\n", *mpt);
print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_ADDRESS, 16,
1, mpc, mpc->length, 1);
smp_dump_mptable(mpc, mpt);
goto out;
}
}
@@ -943,23 +938,15 @@ static int __init replace_intsrc_all(struct mpc_table *mpc,
continue;
if (nr_m_spare > 0) {
printk(KERN_INFO "*NEW* found ");
apic_printk(APIC_VERBOSE, "*NEW* found\n");
nr_m_spare--;
assign_to_mpc_intsrc(&mp_irqs[i], m_spare[nr_m_spare]);
m_spare[nr_m_spare] = NULL;
} else {
struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
count += sizeof(struct mpc_intsrc);
if (!mpc_new_phys) {
printk(KERN_INFO "No spare slots, try to append...take your risk, new mpc_length %x\n", count);
} else {
if (count <= mpc_new_length)
printk(KERN_INFO "No spare slots, try to append..., new mpc_length %x\n", count);
else {
printk(KERN_ERR "mpc_new_length %lx is too small\n", mpc_new_length);
goto out;
}
}
if (!check_slot(mpc_new_phys, mpc_new_length, count))
goto out;
assign_to_mpc_intsrc(&mp_irqs[i], m);
mpc->length = count;
mpt += sizeof(struct mpc_intsrc);

Просмотреть файл

@@ -470,7 +470,6 @@ struct pv_mmu_ops pv_mmu_ops = {
#if PAGETABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE
.set_pte_atomic = native_set_pte_atomic,
.set_pte_present = native_set_pte_present,
.pte_clear = native_pte_clear,
.pmd_clear = native_pmd_clear,
#endif

Просмотреть файл

@@ -380,8 +380,9 @@ static inline struct iommu_table *find_iommu_table(struct device *dev)
return tbl;
}
static void calgary_unmap_sg(struct device *dev,
struct scatterlist *sglist, int nelems, int direction)
static void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems,enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct iommu_table *tbl = find_iommu_table(dev);
struct scatterlist *s;
@@ -404,7 +405,8 @@ static void calgary_unmap_sg(struct device *dev,
}
static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
int nelems, int direction)
int nelems, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct iommu_table *tbl = find_iommu_table(dev);
struct scatterlist *s;
@@ -429,15 +431,14 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
s->dma_address = (entry << PAGE_SHIFT) | s->offset;
/* insert into HW table */
tce_build(tbl, entry, npages, vaddr & PAGE_MASK,
direction);
tce_build(tbl, entry, npages, vaddr & PAGE_MASK, dir);
s->dma_length = s->length;
}
return nelems;
error:
calgary_unmap_sg(dev, sg, nelems, direction);
calgary_unmap_sg(dev, sg, nelems, dir, NULL);
for_each_sg(sg, s, nelems, i) {
sg->dma_address = bad_dma_address;
sg->dma_length = 0;
@@ -445,10 +446,12 @@ error:
return 0;
}
static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr,
size_t size, int direction)
static dma_addr_t calgary_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
void *vaddr = phys_to_virt(paddr);
void *vaddr = page_address(page) + offset;
unsigned long uaddr;
unsigned int npages;
struct iommu_table *tbl = find_iommu_table(dev);
@@ -456,17 +459,18 @@ static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr,
uaddr = (unsigned long)vaddr;
npages = iommu_num_pages(uaddr, size, PAGE_SIZE);
return iommu_alloc(dev, tbl, vaddr, npages, direction);
return iommu_alloc(dev, tbl, vaddr, npages, dir);
}
static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle,
size_t size, int direction)
static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct iommu_table *tbl = find_iommu_table(dev);
unsigned int npages;
npages = iommu_num_pages(dma_handle, size, PAGE_SIZE);
iommu_free(tbl, dma_handle, npages);
npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
iommu_free(tbl, dma_addr, npages);
}
static void* calgary_alloc_coherent(struct device *dev, size_t size,
@@ -515,13 +519,13 @@ static void calgary_free_coherent(struct device *dev, size_t size,
free_pages((unsigned long)vaddr, get_order(size));
}
static struct dma_mapping_ops calgary_dma_ops = {
static struct dma_map_ops calgary_dma_ops = {
.alloc_coherent = calgary_alloc_coherent,
.free_coherent = calgary_free_coherent,
.map_single = calgary_map_single,
.unmap_single = calgary_unmap_single,
.map_sg = calgary_map_sg,
.unmap_sg = calgary_unmap_sg,
.map_page = calgary_map_page,
.unmap_page = calgary_unmap_page,
};
static inline void __iomem * busno_to_bbar(unsigned char num)

Просмотреть файл

@@ -1,4 +1,5 @@
#include <linux/dma-mapping.h>
#include <linux/dma-debug.h>
#include <linux/dmar.h>
#include <linux/bootmem.h>
#include <linux/pci.h>
@@ -12,7 +13,7 @@
static int forbid_dac __read_mostly;
struct dma_mapping_ops *dma_ops;
struct dma_map_ops *dma_ops;
EXPORT_SYMBOL(dma_ops);
static int iommu_sac_force __read_mostly;
@@ -44,6 +45,9 @@ struct device x86_dma_fallback_dev = {
};
EXPORT_SYMBOL(x86_dma_fallback_dev);
/* Number of entries preallocated for DMA-API debugging */
#define PREALLOC_DMA_DEBUG_ENTRIES 32768
int dma_set_mask(struct device *dev, u64 mask)
{
if (!dev->dma_mask || !dma_supported(dev, mask))
@@ -224,7 +228,7 @@ early_param("iommu", iommu_setup);
int dma_supported(struct device *dev, u64 mask)
{
struct dma_mapping_ops *ops = get_dma_ops(dev);
struct dma_map_ops *ops = get_dma_ops(dev);
#ifdef CONFIG_PCI
if (mask > 0xffffffff && forbid_dac > 0) {
@@ -265,6 +269,12 @@ EXPORT_SYMBOL(dma_supported);
static int __init pci_iommu_init(void)
{
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
#ifdef CONFIG_PCI
dma_debug_add_bus(&pci_bus_type);
#endif
calgary_iommu_init();
intel_iommu_init();
@@ -290,8 +300,7 @@ fs_initcall(pci_iommu_init);
static __devinit void via_no_dac(struct pci_dev *dev)
{
if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
printk(KERN_INFO
"PCI: VIA PCI bridge detected. Disabling DAC.\n");
dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
forbid_dac = 1;
}
}

Просмотреть файл

@@ -255,10 +255,13 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
}
/* Map a single area into the IOMMU */
static dma_addr_t
gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
static dma_addr_t gart_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
unsigned long bus;
phys_addr_t paddr = page_to_phys(page) + offset;
if (!dev)
dev = &x86_dma_fallback_dev;
@@ -275,8 +278,9 @@ gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
/*
* Free a DMA mapping.
*/
static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
size_t size, int direction)
static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
unsigned long iommu_page;
int npages;
@@ -298,8 +302,8 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
/*
* Wrapper for pci_unmap_single working with scatterlists.
*/
static void
gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, struct dma_attrs *attrs)
{
struct scatterlist *s;
int i;
@@ -307,7 +311,7 @@ gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
for_each_sg(sg, s, nents, i) {
if (!s->dma_length || !s->length)
break;
gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
gart_unmap_page(dev, s->dma_address, s->dma_length, dir, NULL);
}
}
@@ -329,7 +333,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
addr = dma_map_area(dev, addr, s->length, dir, 0);
if (addr == bad_dma_address) {
if (i > 0)
gart_unmap_sg(dev, sg, i, dir);
gart_unmap_sg(dev, sg, i, dir, NULL);
nents = 0;
sg[0].dma_length = 0;
break;
@@ -400,8 +404,8 @@ dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
* DMA map all entries in a scatterlist.
* Merge chunks that have page aligned sizes into a continuous mapping.
*/
static int
gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, struct dma_attrs *attrs)
{
struct scatterlist *s, *ps, *start_sg, *sgmap;
int need = 0, nextneed, i, out, start;
@@ -468,7 +472,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
error:
flush_gart();
gart_unmap_sg(dev, sg, out, dir);
gart_unmap_sg(dev, sg, out, dir, NULL);
/* When it was forced or merged try again in a dumb way */
if (force_iommu || iommu_merge) {
@@ -521,7 +525,7 @@ static void
gart_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_addr)
{
gart_unmap_single(dev, dma_addr, size, DMA_BIDIRECTIONAL);
gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL);
free_pages((unsigned long)vaddr, get_order(size));
}
@@ -707,11 +711,11 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
return -1;
}
static struct dma_mapping_ops gart_dma_ops = {
.map_single = gart_map_single,
.unmap_single = gart_unmap_single,
static struct dma_map_ops gart_dma_ops = {
.map_sg = gart_map_sg,
.unmap_sg = gart_unmap_sg,
.map_page = gart_map_page,
.unmap_page = gart_unmap_page,
.alloc_coherent = gart_alloc_coherent,
.free_coherent = gart_free_coherent,
};

Просмотреть файл

@@ -1,14 +1,14 @@
/* Fallback functions when the main IOMMU code is not compiled in. This
code is roughly equivalent to i386. */
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/mm.h>
#include <asm/iommu.h>
#include <asm/processor.h>
#include <asm/iommu.h>
#include <asm/dma.h>
static int
@@ -25,19 +25,19 @@ check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
return 1;
}
static dma_addr_t
nommu_map_single(struct device *hwdev, phys_addr_t paddr, size_t size,
int direction)
static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
dma_addr_t bus = paddr;
dma_addr_t bus = page_to_phys(page) + offset;
WARN_ON(size == 0);
if (!check_addr("map_single", hwdev, bus, size))
return bad_dma_address;
if (!check_addr("map_single", dev, bus, size))
return bad_dma_address;
flush_write_buffers();
return bus;
}
/* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scatter-gather version of the
* above pci_map_single interface. Here the scatter gather list
@@ -54,7 +54,8 @@ nommu_map_single(struct device *hwdev, phys_addr_t paddr, size_t size,
* the same here.
*/
static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
int nents, int direction)
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct scatterlist *s;
int i;
@@ -78,12 +79,12 @@ static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
free_pages((unsigned long)vaddr, get_order(size));
}
struct dma_mapping_ops nommu_dma_ops = {
.alloc_coherent = dma_generic_alloc_coherent,
.free_coherent = nommu_free_coherent,
.map_single = nommu_map_single,
.map_sg = nommu_map_sg,
.is_phys = 1,
struct dma_map_ops nommu_dma_ops = {
.alloc_coherent = dma_generic_alloc_coherent,
.free_coherent = nommu_free_coherent,
.map_sg = nommu_map_sg,
.map_page = nommu_map_page,
.is_phys = 1,
};
void __init no_iommu_init(void)

Просмотреть файл

@@ -33,18 +33,11 @@ phys_addr_t swiotlb_bus_to_phys(dma_addr_t baddr)
return baddr;
}
int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size)
int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size)
{
return 0;
}
static dma_addr_t
swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size,
int direction)
{
return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction);
}
static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags)
{
@@ -57,20 +50,20 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
}
struct dma_mapping_ops swiotlb_dma_ops = {
struct dma_map_ops swiotlb_dma_ops = {
.mapping_error = swiotlb_dma_mapping_error,
.alloc_coherent = x86_swiotlb_alloc_coherent,
.free_coherent = swiotlb_free_coherent,
.map_single = swiotlb_map_single_phys,
.unmap_single = swiotlb_unmap_single,
.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
.sync_single_for_device = swiotlb_sync_single_for_device,
.sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
.sync_single_range_for_device = swiotlb_sync_single_range_for_device,
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
.sync_sg_for_device = swiotlb_sync_sg_for_device,
.map_sg = swiotlb_map_sg,
.unmap_sg = swiotlb_unmap_sg,
.map_sg = swiotlb_map_sg_attrs,
.unmap_sg = swiotlb_unmap_sg_attrs,
.map_page = swiotlb_map_page,
.unmap_page = swiotlb_unmap_page,
.dma_supported = NULL,
};

Просмотреть файл

@@ -68,11 +68,11 @@ void exit_thread(void)
{
struct task_struct *me = current;
struct thread_struct *t = &me->thread;
unsigned long *bp = t->io_bitmap_ptr;
if (me->thread.io_bitmap_ptr) {
if (bp) {
struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
kfree(t->io_bitmap_ptr);
t->io_bitmap_ptr = NULL;
clear_thread_flag(TIF_IO_BITMAP);
/*
@@ -81,6 +81,7 @@ void exit_thread(void)
memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
t->io_bitmap_max = 0;
put_cpu();
kfree(bp);
}
ds_exit_thread(current);
@@ -327,7 +328,7 @@ void stop_this_cpu(void *dummy)
/*
* Remove this CPU:
*/
cpu_clear(smp_processor_id(), cpu_online_map);
set_cpu_online(smp_processor_id(), false);
disable_local_APIC();
for (;;) {
@@ -477,12 +478,13 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
return 1;
}
static cpumask_t c1e_mask = CPU_MASK_NONE;
static cpumask_var_t c1e_mask;
static int c1e_detected;
void c1e_remove_cpu(int cpu)
{
cpu_clear(cpu, c1e_mask);
if (c1e_mask != NULL)
cpumask_clear_cpu(cpu, c1e_mask);
}
/*
@@ -511,8 +513,8 @@ static void c1e_idle(void)
if (c1e_detected) {
int cpu = smp_processor_id();
if (!cpu_isset(cpu, c1e_mask)) {
cpu_set(cpu, c1e_mask);
if (!cpumask_test_cpu(cpu, c1e_mask)) {
cpumask_set_cpu(cpu, c1e_mask);
/*
* Force broadcast so ACPI can not interfere. Needs
* to run with interrupts enabled as it uses
@@ -564,6 +566,15 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
pm_idle = default_idle;
}
void __init init_c1e_mask(void)
{
/* If we're using c1e_idle, we need to allocate c1e_mask. */
if (pm_idle == c1e_idle) {
alloc_cpumask_var(&c1e_mask, GFP_KERNEL);
cpumask_clear(c1e_mask);
}
}
static int __init idle_setup(char *str)
{
if (!str)

Просмотреть файл

@@ -245,7 +245,7 @@ void prepare_to_copy(struct task_struct *tsk)
unlazy_fpu(tsk);
}
int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
int copy_thread(unsigned long clone_flags, unsigned long sp,
unsigned long unused,
struct task_struct *p, struct pt_regs *regs)
{

Просмотреть файл

@@ -278,7 +278,7 @@ void prepare_to_copy(struct task_struct *tsk)
unlazy_fpu(tsk);
}
int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
int copy_thread(unsigned long clone_flags, unsigned long sp,
unsigned long unused,
struct task_struct *p, struct pt_regs *regs)
{

Просмотреть файл

@@ -686,9 +686,8 @@ static int ptrace_bts_config(struct task_struct *child,
if (!cfg.signal)
return -EINVAL;
return -EOPNOTSUPP;
child->thread.bts_ovfl_signal = cfg.signal;
return -EOPNOTSUPP;
}
if ((cfg.flags & PTRACE_BTS_O_ALLOC) &&
@@ -1463,6 +1462,6 @@ asmregparm void syscall_trace_leave(struct pt_regs *regs)
* system call instruction.
*/
if (test_thread_flag(TIF_SINGLESTEP) &&
tracehook_consider_fatal_signal(current, SIGTRAP, SIG_DFL))
tracehook_consider_fatal_signal(current, SIGTRAP))
send_sigtrap(current, regs, 0, TRAP_BRKPT);
}

Просмотреть файл

@@ -74,8 +74,7 @@ static void ich_force_hpet_resume(void)
if (!force_hpet_address)
return;
if (rcba_base == NULL)
BUG();
BUG_ON(rcba_base == NULL);
/* read the Function Disable register, dword mode only */
val = readl(rcba_base + 0x3404);
@@ -172,7 +171,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4,
ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7,
ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x3a16, /* ICH10 */
ich_force_enable_hpet);
static struct pci_dev *cached_dev;

Просмотреть файл

@@ -17,7 +17,8 @@
#define PTR(x) (x << 2)
/* control_page + KEXEC_CONTROL_CODE_MAX_SIZE
/*
* control_page + KEXEC_CONTROL_CODE_MAX_SIZE
* ~ control_page + PAGE_SIZE are used as data storage and stack for
* jumping back
*/
@@ -76,8 +77,10 @@ relocate_kernel:
movl %eax, CP_PA_SWAP_PAGE(%edi)
movl %ebx, CP_PA_BACKUP_PAGES_MAP(%edi)
/* get physical address of control page now */
/* this is impossible after page table switch */
/*
* get physical address of control page now
* this is impossible after page table switch
*/
movl PTR(PA_CONTROL_PAGE)(%ebp), %edi
/* switch to new set of page tables */
@@ -97,7 +100,8 @@ identity_mapped:
/* store the start address on the stack */
pushl %edx
/* Set cr0 to a known state:
/*
* Set cr0 to a known state:
* - Paging disabled
* - Alignment check disabled
* - Write protect disabled
@@ -113,7 +117,8 @@ identity_mapped:
/* clear cr4 if applicable */
testl %ecx, %ecx
jz 1f
/* Set cr4 to a known state:
/*
* Set cr4 to a known state:
* Setting everything to zero seems safe.
*/
xorl %eax, %eax
@@ -132,15 +137,18 @@ identity_mapped:
call swap_pages
addl $8, %esp
/* To be certain of avoiding problems with self-modifying code
/*
* To be certain of avoiding problems with self-modifying code
* I need to execute a serializing instruction here.
* So I flush the TLB, it's handy, and not processor dependent.
*/
xorl %eax, %eax
movl %eax, %cr3
/* set all of the registers to known values */
/* leave %esp alone */
/*
* set all of the registers to known values
* leave %esp alone
*/
testl %esi, %esi
jnz 1f

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше