Merge branches 'irq/sparseirq', 'irq/genirq' and 'irq/urgent'; commit 'v2.6.28' into irq/core
This commit is contained in:
@@ -41,7 +41,7 @@ obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
|
||||
obj-y += process.o
|
||||
obj-y += i387.o xsave.o
|
||||
obj-y += ptrace.o
|
||||
obj-y += ds.o
|
||||
obj-$(CONFIG_X86_DS) += ds.o
|
||||
obj-$(CONFIG_X86_32) += tls.o
|
||||
obj-$(CONFIG_IA32_EMULATION) += tls.o
|
||||
obj-y += step.o
|
||||
|
@@ -187,6 +187,8 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
|
||||
|
||||
spin_lock_irqsave(&iommu->lock, flags);
|
||||
ret = __iommu_queue_command(iommu, cmd);
|
||||
if (!ret)
|
||||
iommu->need_sync = 1;
|
||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
|
||||
return ret;
|
||||
@@ -210,10 +212,13 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
|
||||
cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
|
||||
CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
|
||||
|
||||
iommu->need_sync = 0;
|
||||
|
||||
spin_lock_irqsave(&iommu->lock, flags);
|
||||
|
||||
if (!iommu->need_sync)
|
||||
goto out;
|
||||
|
||||
iommu->need_sync = 0;
|
||||
|
||||
ret = __iommu_queue_command(iommu, &cmd);
|
||||
|
||||
if (ret)
|
||||
@@ -230,8 +235,9 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
|
||||
status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
|
||||
writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
|
||||
|
||||
if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit()))
|
||||
printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n");
|
||||
if (unlikely(i == EXIT_LOOP_COUNT))
|
||||
panic("AMD IOMMU: Completion wait loop failed\n");
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
|
||||
@@ -254,8 +260,6 @@ static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
|
||||
|
||||
ret = iommu_queue_command(iommu, &cmd);
|
||||
|
||||
iommu->need_sync = 1;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -281,8 +285,6 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
|
||||
|
||||
ret = iommu_queue_command(iommu, &cmd);
|
||||
|
||||
iommu->need_sync = 1;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -343,7 +345,7 @@ static int iommu_map(struct protection_domain *dom,
|
||||
u64 __pte, *pte, *page;
|
||||
|
||||
bus_addr = PAGE_ALIGN(bus_addr);
|
||||
phys_addr = PAGE_ALIGN(bus_addr);
|
||||
phys_addr = PAGE_ALIGN(phys_addr);
|
||||
|
||||
/* only support 512GB address spaces for now */
|
||||
if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK))
|
||||
@@ -599,7 +601,7 @@ static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom)
|
||||
continue;
|
||||
|
||||
p2 = IOMMU_PTE_PAGE(p1[i]);
|
||||
for (j = 0; j < 512; ++i) {
|
||||
for (j = 0; j < 512; ++j) {
|
||||
if (!IOMMU_PTE_PRESENT(p2[j]))
|
||||
continue;
|
||||
p3 = IOMMU_PTE_PAGE(p2[j]);
|
||||
@@ -762,8 +764,6 @@ static void set_device_domain(struct amd_iommu *iommu,
|
||||
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
|
||||
|
||||
iommu_queue_inv_dev_entry(iommu, devid);
|
||||
|
||||
iommu->need_sync = 1;
|
||||
}
|
||||
|
||||
/*****************************************************************************
|
||||
@@ -858,6 +858,9 @@ static int get_device_resources(struct device *dev,
|
||||
print_devid(_bdf, 1);
|
||||
}
|
||||
|
||||
if (domain_for_device(_bdf) == NULL)
|
||||
set_device_domain(*iommu, *domain, _bdf);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -908,7 +911,7 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu,
|
||||
if (address >= dom->aperture_size)
|
||||
return;
|
||||
|
||||
WARN_ON(address & 0xfffULL || address > dom->aperture_size);
|
||||
WARN_ON(address & ~PAGE_MASK || address >= dom->aperture_size);
|
||||
|
||||
pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)];
|
||||
pte += IOMMU_PTE_L0_INDEX(address);
|
||||
@@ -920,8 +923,8 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu,
|
||||
|
||||
/*
|
||||
* This function contains common code for mapping of a physically
|
||||
* contiguous memory region into DMA address space. It is uses by all
|
||||
* mapping functions provided by this IOMMU driver.
|
||||
* contiguous memory region into DMA address space. It is used by all
|
||||
* mapping functions provided with this IOMMU driver.
|
||||
* Must be called with the domain lock held.
|
||||
*/
|
||||
static dma_addr_t __map_single(struct device *dev,
|
||||
@@ -981,7 +984,8 @@ static void __unmap_single(struct amd_iommu *iommu,
|
||||
dma_addr_t i, start;
|
||||
unsigned int pages;
|
||||
|
||||
if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size))
|
||||
if ((dma_addr == bad_dma_address) ||
|
||||
(dma_addr + size > dma_dom->aperture_size))
|
||||
return;
|
||||
|
||||
pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
|
||||
@@ -1031,8 +1035,7 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
|
||||
if (addr == bad_dma_address)
|
||||
goto out;
|
||||
|
||||
if (unlikely(iommu->need_sync))
|
||||
iommu_completion_wait(iommu);
|
||||
iommu_completion_wait(iommu);
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
@@ -1060,8 +1063,7 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr,
|
||||
|
||||
__unmap_single(iommu, domain->priv, dma_addr, size, dir);
|
||||
|
||||
if (unlikely(iommu->need_sync))
|
||||
iommu_completion_wait(iommu);
|
||||
iommu_completion_wait(iommu);
|
||||
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
}
|
||||
@@ -1127,8 +1129,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
goto unmap;
|
||||
}
|
||||
|
||||
if (unlikely(iommu->need_sync))
|
||||
iommu_completion_wait(iommu);
|
||||
iommu_completion_wait(iommu);
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
@@ -1173,8 +1174,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||
s->dma_address = s->dma_length = 0;
|
||||
}
|
||||
|
||||
if (unlikely(iommu->need_sync))
|
||||
iommu_completion_wait(iommu);
|
||||
iommu_completion_wait(iommu);
|
||||
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
}
|
||||
@@ -1225,8 +1225,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (unlikely(iommu->need_sync))
|
||||
iommu_completion_wait(iommu);
|
||||
iommu_completion_wait(iommu);
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
@@ -1257,8 +1256,7 @@ static void free_coherent(struct device *dev, size_t size,
|
||||
|
||||
__unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
|
||||
|
||||
if (unlikely(iommu->need_sync))
|
||||
iommu_completion_wait(iommu);
|
||||
iommu_completion_wait(iommu);
|
||||
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
|
||||
|
@@ -427,6 +427,10 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
|
||||
memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
|
||||
&entry, sizeof(entry));
|
||||
|
||||
/* set head and tail to zero manually */
|
||||
writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
|
||||
writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
|
||||
|
||||
iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
|
||||
|
||||
return cmd_buf;
|
||||
@@ -1074,7 +1078,8 @@ int __init amd_iommu_init(void)
|
||||
goto free;
|
||||
|
||||
/* IOMMU rlookup table - find the IOMMU for a specific device */
|
||||
amd_iommu_rlookup_table = (void *)__get_free_pages(GFP_KERNEL,
|
||||
amd_iommu_rlookup_table = (void *)__get_free_pages(
|
||||
GFP_KERNEL | __GFP_ZERO,
|
||||
get_order(rlookup_table_size));
|
||||
if (amd_iommu_rlookup_table == NULL)
|
||||
goto free;
|
||||
|
@@ -1315,7 +1315,7 @@ void enable_x2apic(void)
|
||||
}
|
||||
}
|
||||
|
||||
void enable_IR_x2apic(void)
|
||||
void __init enable_IR_x2apic(void)
|
||||
{
|
||||
#ifdef CONFIG_INTR_REMAP
|
||||
int ret;
|
||||
|
@@ -115,9 +115,20 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data)
|
||||
u32 i = 0;
|
||||
|
||||
if (cpu_family == CPU_HW_PSTATE) {
|
||||
rdmsr(MSR_PSTATE_STATUS, lo, hi);
|
||||
i = lo & HW_PSTATE_MASK;
|
||||
data->currpstate = i;
|
||||
if (data->currpstate == HW_PSTATE_INVALID) {
|
||||
/* read (initial) hw pstate if not yet set */
|
||||
rdmsr(MSR_PSTATE_STATUS, lo, hi);
|
||||
i = lo & HW_PSTATE_MASK;
|
||||
|
||||
/*
|
||||
* a workaround for family 11h erratum 311 might cause
|
||||
* an "out-of-range Pstate if the core is in Pstate-0
|
||||
*/
|
||||
if (i >= data->numps)
|
||||
data->currpstate = HW_PSTATE_0;
|
||||
else
|
||||
data->currpstate = i;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
do {
|
||||
@@ -1121,6 +1132,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
|
||||
}
|
||||
|
||||
data->cpu = pol->cpu;
|
||||
data->currpstate = HW_PSTATE_INVALID;
|
||||
|
||||
if (powernow_k8_cpu_init_acpi(data)) {
|
||||
/*
|
||||
|
@@ -5,6 +5,19 @@
|
||||
* http://www.gnu.org/licenses/gpl.html
|
||||
*/
|
||||
|
||||
|
||||
enum pstate {
|
||||
HW_PSTATE_INVALID = 0xff,
|
||||
HW_PSTATE_0 = 0,
|
||||
HW_PSTATE_1 = 1,
|
||||
HW_PSTATE_2 = 2,
|
||||
HW_PSTATE_3 = 3,
|
||||
HW_PSTATE_4 = 4,
|
||||
HW_PSTATE_5 = 5,
|
||||
HW_PSTATE_6 = 6,
|
||||
HW_PSTATE_7 = 7,
|
||||
};
|
||||
|
||||
struct powernow_k8_data {
|
||||
unsigned int cpu;
|
||||
|
||||
@@ -23,7 +36,9 @@ struct powernow_k8_data {
|
||||
u32 exttype; /* extended interface = 1 */
|
||||
|
||||
/* keep track of the current fid / vid or pstate */
|
||||
u32 currvid, currfid, currpstate;
|
||||
u32 currvid;
|
||||
u32 currfid;
|
||||
enum pstate currpstate;
|
||||
|
||||
/* the powernow_table includes all frequency and vid/fid pairings:
|
||||
* fid are the lower 8 bits of the index, vid are the upper 8 bits.
|
||||
|
@@ -510,12 +510,9 @@ static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c)
|
||||
*/
|
||||
void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
|
||||
{
|
||||
static cpumask_t mce_cpus = CPU_MASK_NONE;
|
||||
|
||||
mce_cpu_quirks(c);
|
||||
|
||||
if (mce_dont_init ||
|
||||
cpu_test_and_set(smp_processor_id(), mce_cpus) ||
|
||||
!mce_available(c))
|
||||
return;
|
||||
|
||||
|
@@ -21,8 +21,6 @@
|
||||
*/
|
||||
|
||||
|
||||
#ifdef CONFIG_X86_DS
|
||||
|
||||
#include <asm/ds.h>
|
||||
|
||||
#include <linux/errno.h>
|
||||
@@ -211,14 +209,15 @@ static DEFINE_PER_CPU(struct ds_context *, system_context);
|
||||
static inline struct ds_context *ds_get_context(struct task_struct *task)
|
||||
{
|
||||
struct ds_context *context;
|
||||
unsigned long irq;
|
||||
|
||||
spin_lock(&ds_lock);
|
||||
spin_lock_irqsave(&ds_lock, irq);
|
||||
|
||||
context = (task ? task->thread.ds_ctx : this_system_context);
|
||||
if (context)
|
||||
context->count++;
|
||||
|
||||
spin_unlock(&ds_lock);
|
||||
spin_unlock_irqrestore(&ds_lock, irq);
|
||||
|
||||
return context;
|
||||
}
|
||||
@@ -226,55 +225,46 @@ static inline struct ds_context *ds_get_context(struct task_struct *task)
|
||||
/*
|
||||
* Same as ds_get_context, but allocates the context and it's DS
|
||||
* structure, if necessary; returns NULL; if out of memory.
|
||||
*
|
||||
* pre: requires ds_lock to be held
|
||||
*/
|
||||
static inline struct ds_context *ds_alloc_context(struct task_struct *task)
|
||||
{
|
||||
struct ds_context **p_context =
|
||||
(task ? &task->thread.ds_ctx : &this_system_context);
|
||||
struct ds_context *context = *p_context;
|
||||
unsigned long irq;
|
||||
|
||||
if (!context) {
|
||||
spin_unlock(&ds_lock);
|
||||
|
||||
context = kzalloc(sizeof(*context), GFP_KERNEL);
|
||||
|
||||
if (!context) {
|
||||
spin_lock(&ds_lock);
|
||||
if (!context)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL);
|
||||
if (!context->ds) {
|
||||
kfree(context);
|
||||
spin_lock(&ds_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
spin_lock(&ds_lock);
|
||||
/*
|
||||
* Check for race - another CPU could have allocated
|
||||
* it meanwhile:
|
||||
*/
|
||||
spin_lock_irqsave(&ds_lock, irq);
|
||||
|
||||
if (*p_context) {
|
||||
kfree(context->ds);
|
||||
kfree(context);
|
||||
return *p_context;
|
||||
|
||||
context = *p_context;
|
||||
} else {
|
||||
*p_context = context;
|
||||
|
||||
context->this = p_context;
|
||||
context->task = task;
|
||||
|
||||
if (task)
|
||||
set_tsk_thread_flag(task, TIF_DS_AREA_MSR);
|
||||
|
||||
if (!task || (task == current))
|
||||
wrmsrl(MSR_IA32_DS_AREA,
|
||||
(unsigned long)context->ds);
|
||||
}
|
||||
|
||||
*p_context = context;
|
||||
|
||||
context->this = p_context;
|
||||
context->task = task;
|
||||
|
||||
if (task)
|
||||
set_tsk_thread_flag(task, TIF_DS_AREA_MSR);
|
||||
|
||||
if (!task || (task == current))
|
||||
wrmsr(MSR_IA32_DS_AREA, (unsigned long)context->ds, 0);
|
||||
|
||||
get_tracer(task);
|
||||
spin_unlock_irqrestore(&ds_lock, irq);
|
||||
}
|
||||
|
||||
context->count++;
|
||||
@@ -288,10 +278,12 @@ static inline struct ds_context *ds_alloc_context(struct task_struct *task)
|
||||
*/
|
||||
static inline void ds_put_context(struct ds_context *context)
|
||||
{
|
||||
unsigned long irq;
|
||||
|
||||
if (!context)
|
||||
return;
|
||||
|
||||
spin_lock(&ds_lock);
|
||||
spin_lock_irqsave(&ds_lock, irq);
|
||||
|
||||
if (--context->count)
|
||||
goto out;
|
||||
@@ -313,7 +305,7 @@ static inline void ds_put_context(struct ds_context *context)
|
||||
kfree(context->ds);
|
||||
kfree(context);
|
||||
out:
|
||||
spin_unlock(&ds_lock);
|
||||
spin_unlock_irqrestore(&ds_lock, irq);
|
||||
}
|
||||
|
||||
|
||||
@@ -384,6 +376,7 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
|
||||
struct ds_context *context;
|
||||
unsigned long buffer, adj;
|
||||
const unsigned long alignment = (1 << 3);
|
||||
unsigned long irq;
|
||||
int error = 0;
|
||||
|
||||
if (!ds_cfg.sizeof_ds)
|
||||
@@ -398,26 +391,27 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
|
||||
spin_lock(&ds_lock);
|
||||
|
||||
error = -ENOMEM;
|
||||
context = ds_alloc_context(task);
|
||||
if (!context)
|
||||
goto out_unlock;
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_irqsave(&ds_lock, irq);
|
||||
|
||||
error = -EPERM;
|
||||
if (!check_tracer(task))
|
||||
goto out_unlock;
|
||||
|
||||
get_tracer(task);
|
||||
|
||||
error = -EALREADY;
|
||||
if (context->owner[qual] == current)
|
||||
goto out_unlock;
|
||||
goto out_put_tracer;
|
||||
error = -EPERM;
|
||||
if (context->owner[qual] != NULL)
|
||||
goto out_unlock;
|
||||
goto out_put_tracer;
|
||||
context->owner[qual] = current;
|
||||
|
||||
spin_unlock(&ds_lock);
|
||||
spin_unlock_irqrestore(&ds_lock, irq);
|
||||
|
||||
|
||||
error = -ENOMEM;
|
||||
@@ -465,10 +459,17 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
|
||||
out_release:
|
||||
context->owner[qual] = NULL;
|
||||
ds_put_context(context);
|
||||
put_tracer(task);
|
||||
return error;
|
||||
|
||||
out_put_tracer:
|
||||
spin_unlock_irqrestore(&ds_lock, irq);
|
||||
ds_put_context(context);
|
||||
put_tracer(task);
|
||||
return error;
|
||||
|
||||
out_unlock:
|
||||
spin_unlock(&ds_lock);
|
||||
spin_unlock_irqrestore(&ds_lock, irq);
|
||||
ds_put_context(context);
|
||||
return error;
|
||||
}
|
||||
@@ -818,13 +819,21 @@ static const struct ds_configuration ds_cfg_var = {
|
||||
.sizeof_ds = sizeof(long) * 12,
|
||||
.sizeof_field = sizeof(long),
|
||||
.sizeof_rec[ds_bts] = sizeof(long) * 3,
|
||||
#ifdef __i386__
|
||||
.sizeof_rec[ds_pebs] = sizeof(long) * 10
|
||||
#else
|
||||
.sizeof_rec[ds_pebs] = sizeof(long) * 18
|
||||
#endif
|
||||
};
|
||||
static const struct ds_configuration ds_cfg_64 = {
|
||||
.sizeof_ds = 8 * 12,
|
||||
.sizeof_field = 8,
|
||||
.sizeof_rec[ds_bts] = 8 * 3,
|
||||
#ifdef __i386__
|
||||
.sizeof_rec[ds_pebs] = 8 * 10
|
||||
#else
|
||||
.sizeof_rec[ds_pebs] = 8 * 18
|
||||
#endif
|
||||
};
|
||||
|
||||
static inline void
|
||||
@@ -878,4 +887,3 @@ void ds_free(struct ds_context *context)
|
||||
while (leftovers--)
|
||||
ds_put_context(context);
|
||||
}
|
||||
#endif /* CONFIG_X86_DS */
|
||||
|
@@ -58,7 +58,7 @@ void __cpuinit mxcsr_feature_mask_init(void)
|
||||
stts();
|
||||
}
|
||||
|
||||
void __init init_thread_xstate(void)
|
||||
void __cpuinit init_thread_xstate(void)
|
||||
{
|
||||
if (!HAVE_HWFP) {
|
||||
xstate_size = sizeof(struct i387_soft_struct);
|
||||
|
Plik diff jest za duży
Load Diff
@@ -118,6 +118,9 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||
}
|
||||
|
||||
desc = irq_to_desc(i);
|
||||
if (!desc)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
#ifndef CONFIG_SMP
|
||||
any_count = kstat_irqs(i);
|
||||
|
@@ -242,6 +242,8 @@ void fixup_irqs(cpumask_t map)
|
||||
for_each_irq_desc(irq, desc) {
|
||||
cpumask_t mask;
|
||||
|
||||
if (!desc)
|
||||
continue;
|
||||
if (irq == 2)
|
||||
continue;
|
||||
|
||||
|
@@ -94,6 +94,8 @@ void fixup_irqs(cpumask_t map)
|
||||
int break_affinity = 0;
|
||||
int set_affinity = 1;
|
||||
|
||||
if (!desc)
|
||||
continue;
|
||||
if (irq == 2)
|
||||
continue;
|
||||
|
||||
|
@@ -68,8 +68,7 @@ void __init init_ISA_irqs (void)
|
||||
/*
|
||||
* 16 old-style INTA-cycle interrupts:
|
||||
*/
|
||||
for (i = 0; i < 16; i++) {
|
||||
/* first time call this irq_desc */
|
||||
for (i = 0; i < NR_IRQS_LEGACY; i++) {
|
||||
struct irq_desc *desc = irq_to_desc(i);
|
||||
|
||||
desc->status = IRQ_DISABLED;
|
||||
|
@@ -142,8 +142,7 @@ void __init init_ISA_irqs(void)
|
||||
init_bsp_APIC();
|
||||
init_8259A(0);
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
/* first time call this irq_desc */
|
||||
for (i = 0; i < NR_IRQS_LEGACY; i++) {
|
||||
struct irq_desc *desc = irq_to_desc(i);
|
||||
|
||||
desc->status = IRQ_DISABLED;
|
||||
|
@@ -128,7 +128,7 @@ static int kvm_register_clock(char *txt)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
static void __devinit kvm_setup_secondary_clock(void)
|
||||
static void __cpuinit kvm_setup_secondary_clock(void)
|
||||
{
|
||||
/*
|
||||
* Now that the first cpu already had this clocksource initialized,
|
||||
|
@@ -272,13 +272,18 @@ static struct attribute_group mc_attr_group = {
|
||||
.name = "microcode",
|
||||
};
|
||||
|
||||
static void microcode_fini_cpu(int cpu)
|
||||
static void __microcode_fini_cpu(int cpu)
|
||||
{
|
||||
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
||||
|
||||
mutex_lock(µcode_mutex);
|
||||
microcode_ops->microcode_fini_cpu(cpu);
|
||||
uci->valid = 0;
|
||||
}
|
||||
|
||||
static void microcode_fini_cpu(int cpu)
|
||||
{
|
||||
mutex_lock(µcode_mutex);
|
||||
__microcode_fini_cpu(cpu);
|
||||
mutex_unlock(µcode_mutex);
|
||||
}
|
||||
|
||||
@@ -306,12 +311,16 @@ static int microcode_resume_cpu(int cpu)
|
||||
* to this cpu (a bit of paranoia):
|
||||
*/
|
||||
if (microcode_ops->collect_cpu_info(cpu, &nsig)) {
|
||||
microcode_fini_cpu(cpu);
|
||||
__microcode_fini_cpu(cpu);
|
||||
printk(KERN_ERR "failed to collect_cpu_info for resuming cpu #%d\n",
|
||||
cpu);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (memcmp(&nsig, &uci->cpu_sig, sizeof(nsig))) {
|
||||
microcode_fini_cpu(cpu);
|
||||
if ((nsig.sig != uci->cpu_sig.sig) || (nsig.pf != uci->cpu_sig.pf)) {
|
||||
__microcode_fini_cpu(cpu);
|
||||
printk(KERN_ERR "cached ucode doesn't match the resuming cpu #%d\n",
|
||||
cpu);
|
||||
/* Should we look for a new ucode here? */
|
||||
return 1;
|
||||
}
|
||||
|
@@ -155,6 +155,7 @@ static DEFINE_SPINLOCK(microcode_update_lock);
|
||||
static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu_num);
|
||||
unsigned long flags;
|
||||
unsigned int val[2];
|
||||
|
||||
memset(csig, 0, sizeof(*csig));
|
||||
@@ -174,11 +175,16 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
|
||||
csig->pf = 1 << ((val[1] >> 18) & 7);
|
||||
}
|
||||
|
||||
/* serialize access to the physical write to MSR 0x79 */
|
||||
spin_lock_irqsave(µcode_update_lock, flags);
|
||||
|
||||
wrmsr(MSR_IA32_UCODE_REV, 0, 0);
|
||||
/* see notes above for revision 1.07. Apparent chip bug */
|
||||
sync_core();
|
||||
/* get the current revision from MSR 0x8B */
|
||||
rdmsr(MSR_IA32_UCODE_REV, val[0], csig->rev);
|
||||
spin_unlock_irqrestore(µcode_update_lock, flags);
|
||||
|
||||
pr_debug("microcode: collect_cpu_info : sig=0x%x, pf=0x%x, rev=0x%x\n",
|
||||
csig->sig, csig->pf, csig->rev);
|
||||
|
||||
|
@@ -604,6 +604,9 @@ static void __init __get_smp_config(unsigned int early)
|
||||
printk(KERN_INFO "Using ACPI for processor (LAPIC) "
|
||||
"configuration information\n");
|
||||
|
||||
if (!mpf)
|
||||
return;
|
||||
|
||||
printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n",
|
||||
mpf->mpf_specification);
|
||||
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
|
||||
|
@@ -7,7 +7,8 @@
|
||||
|
||||
#include <asm/paravirt.h>
|
||||
|
||||
static void default_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags)
|
||||
static inline void
|
||||
default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
|
||||
{
|
||||
__raw_spin_lock(lock);
|
||||
}
|
||||
|
@@ -1567,7 +1567,7 @@ static int __init calgary_parse_options(char *p)
|
||||
++p;
|
||||
if (*p == '\0')
|
||||
break;
|
||||
bridge = simple_strtol(p, &endp, 0);
|
||||
bridge = simple_strtoul(p, &endp, 0);
|
||||
if (p == endp)
|
||||
break;
|
||||
|
||||
|
@@ -123,6 +123,8 @@ static void free_iommu(unsigned long offset, int size)
|
||||
|
||||
spin_lock_irqsave(&iommu_bitmap_lock, flags);
|
||||
iommu_area_free(iommu_gart_bitmap, offset, size);
|
||||
if (offset >= next_bit)
|
||||
next_bit = offset + size;
|
||||
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
|
||||
}
|
||||
|
||||
@@ -743,10 +745,8 @@ void __init gart_iommu_init(void)
|
||||
unsigned long scratch;
|
||||
long i;
|
||||
|
||||
if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) {
|
||||
printk(KERN_INFO "PCI-GART: No AMD GART found.\n");
|
||||
if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0)
|
||||
return;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_AGP_AMD64
|
||||
no_agp = 1;
|
||||
|
@@ -794,6 +794,9 @@ void __init setup_arch(char **cmdline_p)
|
||||
printk(KERN_INFO "Command line: %s\n", boot_command_line);
|
||||
#endif
|
||||
|
||||
/* VMI may relocate the fixmap; do this before touching ioremap area */
|
||||
vmi_init();
|
||||
|
||||
early_cpu_init();
|
||||
early_ioremap_init();
|
||||
|
||||
@@ -880,13 +883,8 @@ void __init setup_arch(char **cmdline_p)
|
||||
check_efer();
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_VMI) && defined(CONFIG_X86_32)
|
||||
/*
|
||||
* Must be before kernel pagetables are setup
|
||||
* or fixmap area is touched.
|
||||
*/
|
||||
vmi_init();
|
||||
#endif
|
||||
/* Must be before kernel pagetables are setup */
|
||||
vmi_activate();
|
||||
|
||||
/* after early param, so could get panic from serial */
|
||||
reserve_early_setup_data();
|
||||
@@ -1082,7 +1080,7 @@ void __init setup_arch(char **cmdline_p)
|
||||
ioapic_init_mappings();
|
||||
|
||||
/* need to wait for io_apic is mapped */
|
||||
nr_irqs = probe_nr_irqs();
|
||||
probe_nr_irqs_gsi();
|
||||
|
||||
kvm_guest_init();
|
||||
|
||||
|
@@ -294,9 +294,7 @@ static void __cpuinit start_secondary(void *unused)
|
||||
* fragile that we want to limit the things done here to the
|
||||
* most necessary things.
|
||||
*/
|
||||
#ifdef CONFIG_VMI
|
||||
vmi_bringup();
|
||||
#endif
|
||||
cpu_init();
|
||||
preempt_disable();
|
||||
smp_callin();
|
||||
|
@@ -960,8 +960,6 @@ static inline int __init activate_vmi(void)
|
||||
|
||||
void __init vmi_init(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (!vmi_rom)
|
||||
probe_vmi_rom();
|
||||
else
|
||||
@@ -973,13 +971,21 @@ void __init vmi_init(void)
|
||||
|
||||
reserve_top_address(-vmi_rom->virtual_top);
|
||||
|
||||
local_irq_save(flags);
|
||||
activate_vmi();
|
||||
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
/* This is virtual hardware; timer routing is wired correctly */
|
||||
no_timer_check = 1;
|
||||
#endif
|
||||
}
|
||||
|
||||
void vmi_activate(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (!vmi_rom)
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
activate_vmi();
|
||||
local_irq_restore(flags & X86_EFLAGS_IF);
|
||||
}
|
||||
|
||||
|
@@ -310,7 +310,7 @@ static void __init setup_xstate_init(void)
|
||||
/*
|
||||
* Enable and initialize the xsave feature.
|
||||
*/
|
||||
void __init xsave_cntxt_init(void)
|
||||
void __ref xsave_cntxt_init(void)
|
||||
{
|
||||
unsigned int eax, ebx, ecx, edx;
|
||||
|
||||
|
Reference in New Issue
Block a user