Merge commit 'v2.6.35-rc3' into perf/core

Merge reason: Go from -rc1 base to -rc3 base, merge in fixes.
This commit is contained in:
Ingo Molnar
2010-06-18 10:53:12 +02:00
1019 changed files with 83316 additions and 7749 deletions

View File

@@ -1,4 +1,4 @@
.section .text.page_aligned
.section .text..page_aligned
#include <linux/linkage.h>
#include <asm/segment.h>
#include <asm/page_types.h>

View File

@@ -1487,6 +1487,7 @@ static int __attach_device(struct device *dev,
struct protection_domain *domain)
{
struct iommu_dev_data *dev_data, *alias_data;
int ret;
dev_data = get_dev_data(dev);
alias_data = get_dev_data(dev_data->alias);
@@ -1498,13 +1499,14 @@ static int __attach_device(struct device *dev,
spin_lock(&domain->lock);
/* Some sanity checks */
ret = -EBUSY;
if (alias_data->domain != NULL &&
alias_data->domain != domain)
return -EBUSY;
goto out_unlock;
if (dev_data->domain != NULL &&
dev_data->domain != domain)
return -EBUSY;
goto out_unlock;
/* Do real assignment */
if (dev_data->alias != dev) {
@@ -1520,10 +1522,14 @@ static int __attach_device(struct device *dev,
atomic_inc(&dev_data->bind);
ret = 0;
out_unlock:
/* ready */
spin_unlock(&domain->lock);
return 0;
return ret;
}
/*
@@ -2324,10 +2330,6 @@ int __init amd_iommu_init_dma_ops(void)
iommu_detected = 1;
swiotlb = 0;
#ifdef CONFIG_GART_IOMMU
gart_iommu_aperture_disabled = 1;
gart_iommu_aperture = 0;
#endif
/* Make the driver finally visible to the drivers */
dma_ops = &amd_iommu_dma_ops;

View File

@@ -287,8 +287,12 @@ static u8 * __init iommu_map_mmio_space(u64 address)
{
u8 *ret;
if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu"))
if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) {
pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n",
address);
pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
return NULL;
}
ret = ioremap_nocache(address, MMIO_REGION_LENGTH);
if (ret != NULL)
@@ -1314,7 +1318,7 @@ static int __init amd_iommu_init(void)
ret = amd_iommu_init_dma_ops();
if (ret)
goto free;
goto free_disable;
amd_iommu_init_api();
@@ -1332,9 +1336,10 @@ static int __init amd_iommu_init(void)
out:
return ret;
free:
free_disable:
disable_iommus();
free:
amd_iommu_uninit_devices();
free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
@@ -1353,6 +1358,15 @@ free:
free_unity_maps();
#ifdef CONFIG_GART_IOMMU
/*
* We failed to initialize the AMD IOMMU - try fallback to GART
* if possible.
*/
gart_iommu_init();
#endif
goto out;
}

View File

@@ -36,6 +36,7 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/debugfs.h>
#include <linux/edac_mce.h>
#include <asm/processor.h>
#include <asm/hw_irq.h>
@@ -168,6 +169,15 @@ void mce_log(struct mce *mce)
for (;;) {
entry = rcu_dereference_check_mce(mcelog.next);
for (;;) {
/*
* If edac_mce is enabled, it will check the error type
* and will process it, if it is a known error.
* Otherwise, the error will be sent through mcelog
* interface
*/
if (edac_mce_parse(mce))
return;
/*
* When the buffer fills up discard new entries.
* Assume that the earlier errors are the more

View File

@@ -34,7 +34,7 @@ EXPORT_SYMBOL(init_task);
/*
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
* no more per-task TSS's. The TSS size is kept cacheline-aligned
* so they are allowed to end up in the .data.cacheline_aligned
* so they are allowed to end up in the .data..cacheline_aligned
* section. Since TSS's are completely CPU-local, we want them
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
*/

View File

@@ -21,12 +21,6 @@
#include <asm/cpu.h>
#include <asm/stackprotector.h>
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
# define DBG(fmt, ...) pr_dbg(fmt, ##__VA_ARGS__)
#else
# define DBG(fmt, ...) do { if (0) pr_dbg(fmt, ##__VA_ARGS__); } while (0)
#endif
DEFINE_PER_CPU(int, cpu_number);
EXPORT_PER_CPU_SYMBOL(cpu_number);
@@ -247,7 +241,7 @@ void __init setup_per_cpu_areas(void)
#endif
#endif
/*
* Up to this point, the boot CPU has been using .data.init
* Up to this point, the boot CPU has been using .init.data
* area. Reload any changed state for the boot CPU.
*/
if (cpu == boot_cpu_id)

View File

@@ -686,7 +686,7 @@ static void __cpuinit do_fork_idle(struct work_struct *work)
static void __cpuinit announce_cpu(int cpu, int apicid)
{
static int current_node = -1;
int node = cpu_to_node(cpu);
int node = early_cpu_to_node(cpu);
if (system_state == SYSTEM_BOOTING) {
if (node != current_node) {

View File

@@ -97,7 +97,7 @@ SECTIONS
HEAD_TEXT
#ifdef CONFIG_X86_32
. = ALIGN(PAGE_SIZE);
*(.text.page_aligned)
*(.text..page_aligned)
#endif
. = ALIGN(8);
_stext = .;
@@ -305,7 +305,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
__bss_start = .;
*(.bss.page_aligned)
*(.bss..page_aligned)
*(.bss)
. = ALIGN(4);
__bss_stop = .;