Merge branch 'x86/mm' into core/percpu

Conflicts:
	arch/x86/mm/fault.c
This commit is contained in:
Ingo Molnar
2009-01-21 10:39:51 +01:00
527 changed files with 8252 additions and 3743 deletions

View File

@@ -56,6 +56,10 @@
#include "head_booke.h"
#endif
#if defined(CONFIG_FSL_BOOKE)
#include "../mm/mmu_decl.h"
#endif
int main(void)
{
DEFINE(THREAD, offsetof(struct task_struct, thread));
@@ -382,6 +386,9 @@ int main(void)
DEFINE(PGD_T_LOG2, PGD_T_LOG2);
DEFINE(PTE_T_LOG2, PTE_T_LOG2);
#endif
#ifdef CONFIG_FSL_BOOKE
DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam));
#endif
#ifdef CONFIG_KVM_EXIT_TIMING
DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,

View File

@@ -113,7 +113,7 @@ struct cache {
struct cache *next_local; /* next cache of >= level */
};
static DEFINE_PER_CPU(struct cache_dir *, cache_dir);
static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu);
/* traversal/modification of this list occurs only at cpu hotplug time;
* access is serialized by cpu hotplug locking
@@ -468,9 +468,9 @@ static struct cache_dir *__cpuinit cacheinfo_create_cache_dir(unsigned int cpu_i
cache_dir->kobj = kobj;
WARN_ON_ONCE(per_cpu(cache_dir, cpu_id) != NULL);
WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL);
per_cpu(cache_dir, cpu_id) = cache_dir;
per_cpu(cache_dir_pcpu, cpu_id) = cache_dir;
return cache_dir;
err:
@@ -820,13 +820,13 @@ void cacheinfo_cpu_offline(unsigned int cpu_id)
/* Prevent userspace from seeing inconsistent state - remove
* the sysfs hierarchy first */
cache_dir = per_cpu(cache_dir, cpu_id);
cache_dir = per_cpu(cache_dir_pcpu, cpu_id);
/* careful, sysfs population may have failed */
if (cache_dir)
remove_cache_dir(cache_dir);
per_cpu(cache_dir, cpu_id) = NULL;
per_cpu(cache_dir_pcpu, cpu_id) = NULL;
/* clear the CPU's bit in its cache chain, possibly freeing
* cache objects */

View File

@@ -79,10 +79,10 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask)
"Warning: IOMMU offset too big for device mask\n");
if (tbl)
printk(KERN_INFO
"mask: 0x%08lx, table offset: 0x%08lx\n",
"mask: 0x%08llx, table offset: 0x%08lx\n",
mask, tbl->it_offset);
else
printk(KERN_INFO "mask: 0x%08lx, table unavailable\n",
printk(KERN_INFO "mask: 0x%08llx, table unavailable\n",
mask);
return 0;
} else

View File

@@ -1518,6 +1518,15 @@ _GLOBAL(pmac_secondary_start)
/* turn on 64-bit mode */
bl .enable_64b_mode
li r0,0
mfspr r3,SPRN_HID4
rldimi r3,r0,40,23 /* clear bit 23 (rm_ci) */
sync
mtspr SPRN_HID4,r3
isync
sync
slbia
/* get TOC pointer (real address) */
bl .relative_toc

View File

@@ -389,10 +389,6 @@ skpinv: addi r6,r6,1 /* Increment */
#endif
#endif
mfspr r3,SPRN_TLB1CFG
andi. r3,r3,0xfff
lis r4,num_tlbcam_entries@ha
stw r3,num_tlbcam_entries@l(r4)
/*
* Decide what sort of machine this is and initialize the MMU.
*/
@@ -711,7 +707,7 @@ interrupt_base:
EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD)
#ifdef CONFIG_PPC_E500MC
EXCEPTION(0x2070, Doorbell, unknown_exception, EXC_XFER_EE)
EXCEPTION(0x2070, Doorbell, unknown_exception, EXC_XFER_STD)
#endif
/* Debug Interrupt */
@@ -909,7 +905,7 @@ KernelSPE:
_GLOBAL(loadcam_entry)
lis r4,TLBCAM@ha
addi r4,r4,TLBCAM@l
mulli r5,r3,20
mulli r5,r3,TLBCAM_SIZE
add r3,r5,r4
lwz r4,0(r3)
mtspr SPRN_MAS0,r4

View File

@@ -239,12 +239,12 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
if (printk_ratelimit()) {
printk(KERN_INFO "iommu_free: invalid entry\n");
printk(KERN_INFO "\tentry = 0x%lx\n", entry);
printk(KERN_INFO "\tdma_addr = 0x%lx\n", (u64)dma_addr);
printk(KERN_INFO "\tTable = 0x%lx\n", (u64)tbl);
printk(KERN_INFO "\tbus# = 0x%lx\n", (u64)tbl->it_busno);
printk(KERN_INFO "\tsize = 0x%lx\n", (u64)tbl->it_size);
printk(KERN_INFO "\tstartOff = 0x%lx\n", (u64)tbl->it_offset);
printk(KERN_INFO "\tindex = 0x%lx\n", (u64)tbl->it_index);
printk(KERN_INFO "\tdma_addr = 0x%llx\n", (u64)dma_addr);
printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl);
printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno);
printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size);
printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset);
printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index);
WARN_ON(1);
}
return;

View File

@@ -240,7 +240,7 @@ static void parse_ppp_data(struct seq_file *m)
if (rc)
return;
seq_printf(m, "partition_entitled_capacity=%ld\n",
seq_printf(m, "partition_entitled_capacity=%lld\n",
ppp_data.entitlement);
seq_printf(m, "group=%d\n", ppp_data.group_num);
seq_printf(m, "system_active_processors=%d\n",
@@ -265,7 +265,7 @@ static void parse_ppp_data(struct seq_file *m)
ppp_data.unallocated_weight);
seq_printf(m, "capacity_weight=%d\n", ppp_data.weight);
seq_printf(m, "capped=%d\n", ppp_data.capped);
seq_printf(m, "unallocated_capacity=%ld\n",
seq_printf(m, "unallocated_capacity=%lld\n",
ppp_data.unallocated_entitlement);
}
@@ -509,10 +509,10 @@ static ssize_t update_ppp(u64 *entitlement, u8 *weight)
} else
return -EINVAL;
pr_debug("%s: current_entitled = %lu, current_weight = %u\n",
pr_debug("%s: current_entitled = %llu, current_weight = %u\n",
__func__, ppp_data.entitlement, ppp_data.weight);
pr_debug("%s: new_entitled = %lu, new_weight = %u\n",
pr_debug("%s: new_entitled = %llu, new_weight = %u\n",
__func__, new_entitled, new_weight);
retval = plpar_hcall_norets(H_SET_PPP, new_entitled, new_weight);
@@ -558,7 +558,7 @@ static ssize_t update_mpp(u64 *entitlement, u8 *weight)
pr_debug("%s: current_entitled = %lu, current_weight = %u\n",
__func__, mpp_data.entitled_mem, mpp_data.mem_weight);
pr_debug("%s: new_entitled = %lu, new_weight = %u\n",
pr_debug("%s: new_entitled = %llu, new_weight = %u\n",
__func__, new_entitled, new_weight);
rc = plpar_hcall_norets(H_SET_MPP, new_entitled, new_weight);

View File

@@ -93,10 +93,35 @@ void __init reserve_crashkernel(void)
KDUMP_KERNELBASE);
crashk_res.start = KDUMP_KERNELBASE;
#else
if (!crashk_res.start) {
/*
* unspecified address, choose a region of specified size
* can overlap with initrd (ignoring corruption when retained)
* ppc64 requires kernel and some stacks to be in first segemnt
*/
crashk_res.start = KDUMP_KERNELBASE;
}
crash_base = PAGE_ALIGN(crashk_res.start);
if (crash_base != crashk_res.start) {
printk("Crash kernel base must be aligned to 0x%lx\n",
PAGE_SIZE);
crashk_res.start = crash_base;
}
#endif
crash_size = PAGE_ALIGN(crash_size);
crashk_res.end = crashk_res.start + crash_size - 1;
/* The crash region must not overlap the current kernel */
if (overlaps_crashkernel(__pa(_stext), _end - _stext)) {
printk(KERN_WARNING
"Crash kernel can not overlap current kernel\n");
crashk_res.start = crashk_res.end = 0;
return;
}
/* Crash kernel trumps memory limit */
if (memory_limit && memory_limit <= crashk_res.end) {
memory_limit = crashk_res.end + 1;

View File

@@ -470,7 +470,7 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
if (bus->self) {
pr_debug("IO mapping for PCI-PCI bridge %s\n",
pci_name(bus->self));
pr_debug(" virt=0x%016lx...0x%016lx\n",
pr_debug(" virt=0x%016llx...0x%016llx\n",
bus->resource[0]->start + _IO_BASE,
bus->resource[0]->end + _IO_BASE);
return 0;
@@ -502,7 +502,7 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
hose->io_base_phys - phys_page);
pr_debug("IO mapping for PHB %s\n", hose->dn->full_name);
pr_debug(" phys=0x%016lx, virt=0x%p (alloc=0x%p)\n",
pr_debug(" phys=0x%016llx, virt=0x%p (alloc=0x%p)\n",
hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc);
pr_debug(" size=0x%016lx (alloc=0x%016lx)\n",
hose->pci_io_size, size_page);
@@ -517,7 +517,7 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
hose->io_resource.start += io_virt_offset;
hose->io_resource.end += io_virt_offset;
pr_debug(" hose->io_resource=0x%016lx...0x%016lx\n",
pr_debug(" hose->io_resource=0x%016llx...0x%016llx\n",
hose->io_resource.start, hose->io_resource.end);
return 0;

View File

@@ -590,6 +590,11 @@ static void __init check_cpu_slb_size(unsigned long node)
{
u32 *slb_size_ptr;
slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL);
if (slb_size_ptr != NULL) {
mmu_slb_size = *slb_size_ptr;
return;
}
slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
if (slb_size_ptr != NULL) {
mmu_slb_size = *slb_size_ptr;

View File

@@ -434,8 +434,8 @@ void __init setup_system(void)
printk("Starting Linux PPC64 %s\n", init_utsname()->version);
printk("-----------------------------------------------------\n");
printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size);
printk("physicalMemorySize = 0x%lx\n", lmb_phys_mem_size());
printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
printk("physicalMemorySize = 0x%llx\n", lmb_phys_mem_size());
if (ppc64_caches.dline_size != 0x80)
printk("ppc64_caches.dcache_line_size = 0x%x\n",
ppc64_caches.dline_size);
@@ -493,7 +493,7 @@ static void __init emergency_stack_init(void)
* bringup, we need to get at them in real mode. This means they
* must also be within the RMO region.
*/
limit = min(0x10000000UL, lmb.rmo_size);
limit = min(0x10000000ULL, lmb.rmo_size);
for_each_possible_cpu(i) {
unsigned long sp;

View File

@@ -87,7 +87,9 @@ SECTIONS
/* The dummy segment contents for the bug workaround mentioned above
near PHDRS. */
.dummy : AT(ADDR(.dummy) - LOAD_OFFSET) {
LONG(0xf177)
LONG(0)
LONG(0)
LONG(0)
} :kernel :dummy
/*