Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: drivers/net/sfc/sfe4001.c drivers/net/wireless/libertas/cmd.c drivers/staging/Kconfig drivers/staging/Makefile drivers/staging/rtl8187se/Kconfig drivers/staging/rtl8192e/Kconfig
This commit is contained in:
@@ -57,5 +57,8 @@ SECTIONS
|
||||
*(.note*)
|
||||
}
|
||||
|
||||
/*
|
||||
* The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
|
||||
*/
|
||||
. = ASSERT(_end <= WAKEUP_SIZE, "Wakeup too big!");
|
||||
}
|
||||
|
@@ -1220,6 +1220,8 @@ static void __detach_device(struct protection_domain *domain, u16 devid)
|
||||
amd_iommu_dev_table[devid].data[1] = 0;
|
||||
amd_iommu_dev_table[devid].data[2] = 0;
|
||||
|
||||
amd_iommu_apply_erratum_63(devid);
|
||||
|
||||
/* decrease reference counter */
|
||||
domain->dev_cnt -= 1;
|
||||
|
||||
|
@@ -240,7 +240,7 @@ static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
|
||||
writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
|
||||
}
|
||||
|
||||
static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
|
||||
static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
|
||||
{
|
||||
u32 ctrl;
|
||||
|
||||
@@ -519,6 +519,26 @@ static void set_dev_entry_bit(u16 devid, u8 bit)
|
||||
amd_iommu_dev_table[devid].data[i] |= (1 << _bit);
|
||||
}
|
||||
|
||||
static int get_dev_entry_bit(u16 devid, u8 bit)
|
||||
{
|
||||
int i = (bit >> 5) & 0x07;
|
||||
int _bit = bit & 0x1f;
|
||||
|
||||
return (amd_iommu_dev_table[devid].data[i] & (1 << _bit)) >> _bit;
|
||||
}
|
||||
|
||||
|
||||
void amd_iommu_apply_erratum_63(u16 devid)
|
||||
{
|
||||
int sysmgt;
|
||||
|
||||
sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
|
||||
(get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
|
||||
|
||||
if (sysmgt == 0x01)
|
||||
set_dev_entry_bit(devid, DEV_ENTRY_IW);
|
||||
}
|
||||
|
||||
/* Writes the specific IOMMU for a device into the rlookup table */
|
||||
static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
|
||||
{
|
||||
@@ -547,6 +567,8 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
|
||||
if (flags & ACPI_DEVFLAG_LINT1)
|
||||
set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
|
||||
|
||||
amd_iommu_apply_erratum_63(devid);
|
||||
|
||||
set_iommu_for_device(iommu, devid);
|
||||
}
|
||||
|
||||
|
@@ -352,14 +352,14 @@ static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) {
|
||||
alias.v = uv_read_local_mmr(redir_addrs[i].alias);
|
||||
if (alias.s.base == 0) {
|
||||
if (alias.s.enable && alias.s.base == 0) {
|
||||
*size = (1UL << alias.s.m_alias);
|
||||
redirect.v = uv_read_local_mmr(redir_addrs[i].redirect);
|
||||
*base = (unsigned long)redirect.s.dest_base << DEST_SHIFT;
|
||||
return;
|
||||
}
|
||||
}
|
||||
BUG();
|
||||
*base = *size = 0;
|
||||
}
|
||||
|
||||
enum map_type {map_wb, map_uc};
|
||||
@@ -619,12 +619,12 @@ void __init uv_system_init(void)
|
||||
uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base;
|
||||
uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size;
|
||||
uv_cpu_hub_info(cpu)->m_val = m_val;
|
||||
uv_cpu_hub_info(cpu)->n_val = m_val;
|
||||
uv_cpu_hub_info(cpu)->n_val = n_val;
|
||||
uv_cpu_hub_info(cpu)->numa_blade_id = blade;
|
||||
uv_cpu_hub_info(cpu)->blade_processor_id = lcpu;
|
||||
uv_cpu_hub_info(cpu)->pnode = pnode;
|
||||
uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask;
|
||||
uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1;
|
||||
uv_cpu_hub_info(cpu)->gpa_mask = (1UL << (m_val + n_val)) - 1;
|
||||
uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
|
||||
uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra;
|
||||
uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
|
||||
|
@@ -526,15 +526,21 @@ static const struct dmi_system_id sw_any_bug_dmi_table[] = {
|
||||
|
||||
static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
|
||||
{
|
||||
/* http://www.intel.com/Assets/PDF/specupdate/314554.pdf
|
||||
/* Intel Xeon Processor 7100 Series Specification Update
|
||||
* http://www.intel.com/Assets/PDF/specupdate/314554.pdf
|
||||
* AL30: A Machine Check Exception (MCE) Occurring during an
|
||||
* Enhanced Intel SpeedStep Technology Ratio Change May Cause
|
||||
* Both Processor Cores to Lock Up when HT is enabled*/
|
||||
* Both Processor Cores to Lock Up. */
|
||||
if (c->x86_vendor == X86_VENDOR_INTEL) {
|
||||
if ((c->x86 == 15) &&
|
||||
(c->x86_model == 6) &&
|
||||
(c->x86_mask == 8) && smt_capable())
|
||||
(c->x86_mask == 8)) {
|
||||
printk(KERN_INFO "acpi-cpufreq: Intel(R) "
|
||||
"Xeon(R) 7100 Errata AL30, processors may "
|
||||
"lock up on frequency changes: disabling "
|
||||
"acpi-cpufreq.\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -549,13 +555,18 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
unsigned int result = 0;
|
||||
struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
|
||||
struct acpi_processor_performance *perf;
|
||||
#ifdef CONFIG_SMP
|
||||
static int blacklisted;
|
||||
#endif
|
||||
|
||||
dprintk("acpi_cpufreq_cpu_init\n");
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
result = acpi_cpufreq_blacklist(c);
|
||||
if (result)
|
||||
return result;
|
||||
if (blacklisted)
|
||||
return blacklisted;
|
||||
blacklisted = acpi_cpufreq_blacklist(c);
|
||||
if (blacklisted)
|
||||
return blacklisted;
|
||||
#endif
|
||||
|
||||
data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL);
|
||||
|
@@ -813,7 +813,7 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
|
||||
memcpy(eblcr, samuel2_eblcr, sizeof(samuel2_eblcr));
|
||||
break;
|
||||
case 1 ... 15:
|
||||
longhaul_version = TYPE_LONGHAUL_V1;
|
||||
longhaul_version = TYPE_LONGHAUL_V2;
|
||||
if (c->x86_mask < 8) {
|
||||
cpu_model = CPU_SAMUEL2;
|
||||
cpuname = "C3 'Samuel 2' [C5B]";
|
||||
|
@@ -1022,7 +1022,7 @@ static int get_transition_latency(struct powernow_k8_data *data)
|
||||
* set it to 1 to avoid problems in the future.
|
||||
* For all others it's a BIOS bug.
|
||||
*/
|
||||
if (!boot_cpu_data.x86 == 0x11)
|
||||
if (boot_cpu_data.x86 != 0x11)
|
||||
printk(KERN_ERR FW_WARN PFX "Invalid zero transition "
|
||||
"latency\n");
|
||||
max_latency = 1;
|
||||
|
@@ -232,28 +232,23 @@ static unsigned int speedstep_detect_chipset(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct get_freq_data {
|
||||
unsigned int speed;
|
||||
unsigned int processor;
|
||||
};
|
||||
|
||||
static void get_freq_data(void *_data)
|
||||
static void get_freq_data(void *_speed)
|
||||
{
|
||||
struct get_freq_data *data = _data;
|
||||
unsigned int *speed = _speed;
|
||||
|
||||
data->speed = speedstep_get_frequency(data->processor);
|
||||
*speed = speedstep_get_frequency(speedstep_processor);
|
||||
}
|
||||
|
||||
static unsigned int speedstep_get(unsigned int cpu)
|
||||
{
|
||||
struct get_freq_data data = { .processor = cpu };
|
||||
unsigned int speed;
|
||||
|
||||
/* You're supposed to ensure CPU is online. */
|
||||
if (smp_call_function_single(cpu, get_freq_data, &data, 1) != 0)
|
||||
if (smp_call_function_single(cpu, get_freq_data, &speed, 1) != 0)
|
||||
BUG();
|
||||
|
||||
dprintk("detected %u kHz as current frequency\n", data.speed);
|
||||
return data.speed;
|
||||
dprintk("detected %u kHz as current frequency\n", speed);
|
||||
return speed;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -85,6 +85,18 @@ static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
|
||||
static DEFINE_PER_CPU(struct mce, mces_seen);
|
||||
static int cpu_missing;
|
||||
|
||||
static void default_decode_mce(struct mce *m)
|
||||
{
|
||||
pr_emerg("No human readable MCE decoding support on this CPU type.\n");
|
||||
pr_emerg("Run the message through 'mcelog --ascii' to decode.\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* CPU/chipset specific EDAC code can register a callback here to print
|
||||
* MCE errors in a human-readable form:
|
||||
*/
|
||||
void (*x86_mce_decode_callback)(struct mce *m) = default_decode_mce;
|
||||
EXPORT_SYMBOL(x86_mce_decode_callback);
|
||||
|
||||
/* MCA banks polled by the period polling timer for corrected events */
|
||||
DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
|
||||
@@ -165,46 +177,46 @@ void mce_log(struct mce *mce)
|
||||
set_bit(0, &mce_need_notify);
|
||||
}
|
||||
|
||||
void __weak decode_mce(struct mce *m)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static void print_mce(struct mce *m)
|
||||
{
|
||||
printk(KERN_EMERG
|
||||
"CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
|
||||
pr_emerg("CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
|
||||
m->extcpu, m->mcgstatus, m->bank, m->status);
|
||||
|
||||
if (m->ip) {
|
||||
printk(KERN_EMERG "RIP%s %02x:<%016Lx> ",
|
||||
!(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
|
||||
m->cs, m->ip);
|
||||
pr_emerg("RIP%s %02x:<%016Lx> ",
|
||||
!(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
|
||||
m->cs, m->ip);
|
||||
|
||||
if (m->cs == __KERNEL_CS)
|
||||
print_symbol("{%s}", m->ip);
|
||||
printk(KERN_CONT "\n");
|
||||
pr_cont("\n");
|
||||
}
|
||||
printk(KERN_EMERG "TSC %llx ", m->tsc);
|
||||
if (m->addr)
|
||||
printk(KERN_CONT "ADDR %llx ", m->addr);
|
||||
if (m->misc)
|
||||
printk(KERN_CONT "MISC %llx ", m->misc);
|
||||
printk(KERN_CONT "\n");
|
||||
printk(KERN_EMERG "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
|
||||
m->cpuvendor, m->cpuid, m->time, m->socketid,
|
||||
m->apicid);
|
||||
|
||||
decode_mce(m);
|
||||
pr_emerg("TSC %llx ", m->tsc);
|
||||
if (m->addr)
|
||||
pr_cont("ADDR %llx ", m->addr);
|
||||
if (m->misc)
|
||||
pr_cont("MISC %llx ", m->misc);
|
||||
|
||||
pr_cont("\n");
|
||||
pr_emerg("PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
|
||||
m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid);
|
||||
|
||||
/*
|
||||
* Print out human-readable details about the MCE error,
|
||||
* (if the CPU has an implementation for that):
|
||||
*/
|
||||
x86_mce_decode_callback(m);
|
||||
}
|
||||
|
||||
static void print_mce_head(void)
|
||||
{
|
||||
printk(KERN_EMERG "\nHARDWARE ERROR\n");
|
||||
pr_emerg("\nHARDWARE ERROR\n");
|
||||
}
|
||||
|
||||
static void print_mce_tail(void)
|
||||
{
|
||||
printk(KERN_EMERG "This is not a software problem!\n"
|
||||
"Run through mcelog --ascii to decode and contact your hardware vendor\n");
|
||||
pr_emerg("This is not a software problem!\n");
|
||||
}
|
||||
|
||||
#define PANIC_TIMEOUT 5 /* 5 seconds */
|
||||
@@ -218,6 +230,7 @@ static atomic_t mce_fake_paniced;
|
||||
static void wait_for_panic(void)
|
||||
{
|
||||
long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
|
||||
|
||||
preempt_disable();
|
||||
local_irq_enable();
|
||||
while (timeout-- > 0)
|
||||
@@ -285,6 +298,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
|
||||
static int msr_to_offset(u32 msr)
|
||||
{
|
||||
unsigned bank = __get_cpu_var(injectm.bank);
|
||||
|
||||
if (msr == rip_msr)
|
||||
return offsetof(struct mce, ip);
|
||||
if (msr == MSR_IA32_MCx_STATUS(bank))
|
||||
@@ -1200,7 +1214,8 @@ static int __cpuinit mce_cap_init(void)
|
||||
rdmsrl(MSR_IA32_MCG_CAP, cap);
|
||||
|
||||
b = cap & MCG_BANKCNT_MASK;
|
||||
printk(KERN_INFO "mce: CPU supports %d MCE banks\n", b);
|
||||
if (!banks)
|
||||
printk(KERN_INFO "mce: CPU supports %d MCE banks\n", b);
|
||||
|
||||
if (b > MAX_NR_BANKS) {
|
||||
printk(KERN_WARNING
|
||||
|
@@ -8,6 +8,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/sched.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/msr.h>
|
||||
|
@@ -846,7 +846,7 @@ int __init mtrr_cleanup(unsigned address_bits)
|
||||
sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL);
|
||||
|
||||
range_sums = sum_ranges(range, nr_range);
|
||||
printk(KERN_INFO "total RAM coverred: %ldM\n",
|
||||
printk(KERN_INFO "total RAM covered: %ldM\n",
|
||||
range_sums >> (20 - PAGE_SHIFT));
|
||||
|
||||
if (mtrr_chunk_size && mtrr_gran_size) {
|
||||
|
@@ -96,17 +96,24 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
|
||||
unsigned long long base, size;
|
||||
char *ptr;
|
||||
char line[LINE_SIZE];
|
||||
int length;
|
||||
size_t linelen;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
if (!len)
|
||||
return -EINVAL;
|
||||
|
||||
memset(line, 0, LINE_SIZE);
|
||||
if (len > LINE_SIZE)
|
||||
len = LINE_SIZE;
|
||||
if (copy_from_user(line, buf, len - 1))
|
||||
|
||||
length = len;
|
||||
length--;
|
||||
|
||||
if (length > LINE_SIZE - 1)
|
||||
length = LINE_SIZE - 1;
|
||||
|
||||
if (length < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(line, buf, length))
|
||||
return -EFAULT;
|
||||
|
||||
linelen = strlen(line);
|
||||
|
@@ -16,6 +16,22 @@ static void *kdump_buf_page;
|
||||
/* Stores the physical address of elf header of crash image. */
|
||||
unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
|
||||
|
||||
static inline bool is_crashed_pfn_valid(unsigned long pfn)
|
||||
{
|
||||
#ifndef CONFIG_X86_PAE
|
||||
/*
|
||||
* non-PAE kdump kernel executed from a PAE one will crop high pte
|
||||
* bits and poke unwanted space counting again from address 0, we
|
||||
* don't want that. pte must fit into unsigned long. In fact the
|
||||
* test checks high 12 bits for being zero (pfn will be shifted left
|
||||
* by PAGE_SHIFT).
|
||||
*/
|
||||
return pte_pfn(pfn_pte(pfn, __pgprot(0))) == pfn;
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* copy_oldmem_page - copy one page from "oldmem"
|
||||
* @pfn: page frame number to be copied
|
||||
@@ -41,6 +57,9 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
|
||||
if (!csize)
|
||||
return 0;
|
||||
|
||||
if (!is_crashed_pfn_valid(pfn))
|
||||
return -EFAULT;
|
||||
|
||||
vaddr = kmap_atomic_pfn(pfn, KM_PTE0);
|
||||
|
||||
if (!userbuf) {
|
||||
|
@@ -1378,8 +1378,8 @@ static unsigned long ram_alignment(resource_size_t pos)
|
||||
if (mb < 16)
|
||||
return 1024*1024;
|
||||
|
||||
/* To 32MB for anything above that */
|
||||
return 32*1024*1024;
|
||||
/* To 64MB for anything above that */
|
||||
return 64*1024*1024;
|
||||
}
|
||||
|
||||
#define MAX_RESOURCE_SIZE ((resource_size_t)-1)
|
||||
|
@@ -206,8 +206,11 @@ static int __init setup_early_printk(char *buf)
|
||||
|
||||
while (*buf != '\0') {
|
||||
if (!strncmp(buf, "serial", 6)) {
|
||||
early_serial_init(buf + 6);
|
||||
buf += 6;
|
||||
early_serial_init(buf);
|
||||
early_console_register(&early_serial_console, keep);
|
||||
if (!strncmp(buf, ",ttyS", 5))
|
||||
buf += 5;
|
||||
}
|
||||
if (!strncmp(buf, "ttyS", 4)) {
|
||||
early_serial_init(buf + 4);
|
||||
|
@@ -454,8 +454,10 @@ void __init efi_init(void)
|
||||
if (add_efi_memmap)
|
||||
do_add_efi_memmap();
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
x86_platform.get_wallclock = efi_get_time;
|
||||
x86_platform.set_wallclock = efi_set_rtc_mmss;
|
||||
#endif
|
||||
|
||||
/* Setup for EFI runtime service */
|
||||
reboot_type = BOOT_EFI;
|
||||
|
@@ -15,8 +15,10 @@ EXPORT_SYMBOL(mcount);
|
||||
* the export, but dont use it from C code, it is used
|
||||
* by assembly code and is not using C calling convention!
|
||||
*/
|
||||
#ifndef CONFIG_X86_CMPXCHG64
|
||||
extern void cmpxchg8b_emu(void);
|
||||
EXPORT_SYMBOL(cmpxchg8b_emu);
|
||||
#endif
|
||||
|
||||
/* Networking helper routines. */
|
||||
EXPORT_SYMBOL(csum_partial_copy_generic);
|
||||
|
@@ -63,10 +63,10 @@ static int show_other_interrupts(struct seq_file *p, int prec)
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
|
||||
seq_printf(p, " Spurious interrupts\n");
|
||||
seq_printf(p, "%*s: ", prec, "CNT");
|
||||
seq_printf(p, "%*s: ", prec, "PMI");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
|
||||
seq_printf(p, " Performance counter interrupts\n");
|
||||
seq_printf(p, " Performance monitoring interrupts\n");
|
||||
seq_printf(p, "%*s: ", prec, "PND");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", irq_stats(j)->apic_pending_irqs);
|
||||
|
@@ -317,6 +317,12 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
|
||||
return UCODE_NFOUND;
|
||||
}
|
||||
|
||||
if (*(u32 *)firmware->data != UCODE_MAGIC) {
|
||||
printk(KERN_ERR "microcode: invalid UCODE_MAGIC (0x%08x)\n",
|
||||
*(u32 *)firmware->data);
|
||||
return UCODE_ERROR;
|
||||
}
|
||||
|
||||
ret = generic_load_microcode(cpu, firmware->data, firmware->size);
|
||||
|
||||
release_firmware(firmware);
|
||||
|
@@ -35,7 +35,7 @@ int iommu_detected __read_mostly = 0;
|
||||
|
||||
/*
|
||||
* This variable becomes 1 if iommu=pt is passed on the kernel command line.
|
||||
* If this variable is 1, IOMMU implementations do no DMA ranslation for
|
||||
* If this variable is 1, IOMMU implementations do no DMA translation for
|
||||
* devices and allow every device to access to whole physical memory. This is
|
||||
* useful if a user want to use an IOMMU only for KVM device assignment to
|
||||
* guests and not for driver dma translation.
|
||||
@@ -45,12 +45,10 @@ int iommu_pass_through __read_mostly;
|
||||
dma_addr_t bad_dma_address __read_mostly = 0;
|
||||
EXPORT_SYMBOL(bad_dma_address);
|
||||
|
||||
/* Dummy device used for NULL arguments (normally ISA). Better would
|
||||
be probably a smaller DMA mask, but this is bug-to-bug compatible
|
||||
to older i386. */
|
||||
/* Dummy device used for NULL arguments (normally ISA). */
|
||||
struct device x86_dma_fallback_dev = {
|
||||
.init_name = "fallback device",
|
||||
.coherent_dma_mask = DMA_BIT_MASK(32),
|
||||
.coherent_dma_mask = ISA_DMA_BIT_MASK,
|
||||
.dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
|
||||
};
|
||||
EXPORT_SYMBOL(x86_dma_fallback_dev);
|
||||
@@ -311,7 +309,7 @@ void pci_iommu_shutdown(void)
|
||||
amd_iommu_shutdown();
|
||||
}
|
||||
/* Must execute after PCI subsystem */
|
||||
fs_initcall(pci_iommu_init);
|
||||
rootfs_initcall(pci_iommu_init);
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
|
||||
|
@@ -16,6 +16,7 @@
|
||||
#include <linux/agp_backend.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/pci.h>
|
||||
|
@@ -664,3 +664,8 @@ long sys_arch_prctl(int code, unsigned long addr)
|
||||
return do_arch_prctl(current, code, addr);
|
||||
}
|
||||
|
||||
unsigned long KSTK_ESP(struct task_struct *task)
|
||||
{
|
||||
return (test_tsk_thread_flag(task, TIF_IA32)) ?
|
||||
(task_pt_regs(task)->sp) : ((task)->thread.usersp);
|
||||
}
|
||||
|
@@ -4,6 +4,7 @@
|
||||
#include <linux/pm.h>
|
||||
#include <linux/efi.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/tboot.h>
|
||||
#include <acpi/reboot.h>
|
||||
#include <asm/io.h>
|
||||
@@ -435,6 +436,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5"),
|
||||
},
|
||||
},
|
||||
{ /* Handle problems with rebooting on Apple Macmini3,1 */
|
||||
.callback = set_pci_reboot,
|
||||
.ident = "Apple Macmini3,1",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Macmini3,1"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
|
@@ -659,6 +659,13 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = {
|
||||
DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = dmi_low_memory_corruption,
|
||||
.ident = "Phoenix/MSC BIOS",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix/MSC"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
* AMI BIOS with low memory corruption was found on Intel DG45ID board.
|
||||
|
@@ -38,7 +38,8 @@ unsigned long profile_pc(struct pt_regs *regs)
|
||||
#ifdef CONFIG_FRAME_POINTER
|
||||
return *(unsigned long *)(regs->bp + sizeof(long));
|
||||
#else
|
||||
unsigned long *sp = (unsigned long *)regs->sp;
|
||||
unsigned long *sp =
|
||||
(unsigned long *)kernel_stack_pointer(regs);
|
||||
/*
|
||||
* Return address is either directly at stack pointer
|
||||
* or above a saved flags. Eflags has bits 22-31 zero,
|
||||
|
@@ -23,8 +23,6 @@
|
||||
static struct bau_control **uv_bau_table_bases __read_mostly;
|
||||
static int uv_bau_retry_limit __read_mostly;
|
||||
|
||||
/* position of pnode (which is nasid>>1): */
|
||||
static int uv_nshift __read_mostly;
|
||||
/* base pnode in this partition */
|
||||
static int uv_partition_base_pnode __read_mostly;
|
||||
|
||||
@@ -723,7 +721,7 @@ uv_activation_descriptor_init(int node, int pnode)
|
||||
BUG_ON(!adp);
|
||||
|
||||
pa = uv_gpa(adp); /* need the real nasid*/
|
||||
n = pa >> uv_nshift;
|
||||
n = uv_gpa_to_pnode(pa);
|
||||
m = pa & uv_mmask;
|
||||
|
||||
uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE,
|
||||
@@ -778,7 +776,7 @@ uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp)
|
||||
* need the pnode of where the memory was really allocated
|
||||
*/
|
||||
pa = uv_gpa(pqp);
|
||||
pn = pa >> uv_nshift;
|
||||
pn = uv_gpa_to_pnode(pa);
|
||||
uv_write_global_mmr64(pnode,
|
||||
UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST,
|
||||
((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) |
|
||||
@@ -843,8 +841,7 @@ static int __init uv_bau_init(void)
|
||||
GFP_KERNEL, cpu_to_node(cur_cpu));
|
||||
|
||||
uv_bau_retry_limit = 1;
|
||||
uv_nshift = uv_hub_info->n_val;
|
||||
uv_mmask = (1UL << uv_hub_info->n_val) - 1;
|
||||
uv_mmask = (1UL << uv_hub_info->m_val) - 1;
|
||||
nblades = uv_num_possible_blades();
|
||||
|
||||
uv_bau_table_bases = (struct bau_control **)
|
||||
|
@@ -3,8 +3,16 @@
|
||||
#include <asm/trampoline.h>
|
||||
#include <asm/e820.h>
|
||||
|
||||
#if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP)
|
||||
#define __trampinit
|
||||
#define __trampinitdata
|
||||
#else
|
||||
#define __trampinit __cpuinit
|
||||
#define __trampinitdata __cpuinitdata
|
||||
#endif
|
||||
|
||||
/* ready for x86_64 and x86 */
|
||||
unsigned char *__cpuinitdata trampoline_base = __va(TRAMPOLINE_BASE);
|
||||
unsigned char *__trampinitdata trampoline_base = __va(TRAMPOLINE_BASE);
|
||||
|
||||
void __init reserve_trampoline_memory(void)
|
||||
{
|
||||
@@ -26,7 +34,7 @@ void __init reserve_trampoline_memory(void)
|
||||
* bootstrap into the page concerned. The caller
|
||||
* has made sure it's suitably aligned.
|
||||
*/
|
||||
unsigned long __cpuinit setup_trampoline(void)
|
||||
unsigned long __trampinit setup_trampoline(void)
|
||||
{
|
||||
memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE);
|
||||
return virt_to_phys(trampoline_base);
|
||||
|
@@ -32,8 +32,12 @@
|
||||
#include <asm/segment.h>
|
||||
#include <asm/processor-flags.h>
|
||||
|
||||
#ifdef CONFIG_ACPI_SLEEP
|
||||
.section .rodata, "a", @progbits
|
||||
#else
|
||||
/* We can free up the trampoline after bootup if cpu hotplug is not supported. */
|
||||
__CPUINITRODATA
|
||||
#endif
|
||||
.code16
|
||||
|
||||
ENTRY(trampoline_data)
|
||||
|
@@ -648,7 +648,7 @@ static inline int __init activate_vmi(void)
|
||||
|
||||
pv_info.paravirt_enabled = 1;
|
||||
pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK;
|
||||
pv_info.name = "vmi";
|
||||
pv_info.name = "vmi [deprecated]";
|
||||
|
||||
pv_init_ops.patch = vmi_patch;
|
||||
|
||||
|
@@ -305,6 +305,9 @@ SECTIONS
|
||||
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
|
||||
*/
|
||||
. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
|
||||
"kernel image bigger than KERNEL_IMAGE_SIZE");
|
||||
#else
|
||||
|
Reference in New Issue
Block a user