Merge branch 'master' into sh/smp

Conflicts:
	arch/sh/mm/cache-sh4.c
This commit is contained in:
Paul Mundt
2009-09-01 13:54:14 +09:00
414 changed files with 15153 additions and 2589 deletions

View File

@@ -82,7 +82,7 @@ config 32BIT
config PMB_ENABLE
bool "Support 32-bit physical addressing through PMB"
depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785)
depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7757 || CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785)
select 32BIT
default y
help
@@ -97,7 +97,7 @@ choice
config PMB
bool "PMB"
depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785)
depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7757 || CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785)
select 32BIT
help
If you say Y here, physical addressing will be extended to
@@ -106,7 +106,8 @@ config PMB
config PMB_FIXED
bool "fixed PMB"
depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7780 || \
depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7757 || \
CPU_SUBTYPE_SH7780 || \
CPU_SUBTYPE_SH7785)
select 32BIT
help

View File

@@ -455,7 +455,49 @@ static void __flush_cache_4096(unsigned long addr, unsigned long phys,
* Break the 1, 2 and 4 way variants of this out into separate functions to
* avoid nearly all the overhead of having the conditional stuff in the function
* bodies (+ the 1 and 2 way cases avoid saving any registers too).
*
* We want to eliminate unnecessary bus transactions, so this code uses
* a non-obvious technique.
*
* Loop over a cache way sized block of, one cache line at a time. For each
* line, use movca.a to cause the current cache line contents to be written
* back, but without reading anything from main memory. However this has the
* side effect that the cache is now caching that memory location. So follow
* this with a cache invalidate to mark the cache line invalid. And do all
* this with interrupts disabled, to avoid the cache line being accidently
* evicted while it is holding garbage.
*
* This also breaks in a number of circumstances:
* - if there are modifications to the region of memory just above
* empty_zero_page (for example because a breakpoint has been placed
* there), then these can be lost.
*
* This is because the the memory address which the cache temporarily
* caches in the above description is empty_zero_page. So the
* movca.l hits the cache (it is assumed that it misses, or at least
* isn't dirty), modifies the line and then invalidates it, losing the
* required change.
*
* - If caches are disabled or configured in write-through mode, then
* the movca.l writes garbage directly into memory.
*/
static void __flush_dcache_segment_writethrough(unsigned long start,
unsigned long extent_per_way)
{
unsigned long addr;
int i;
addr = CACHE_OC_ADDRESS_ARRAY | (start & cpu_data->dcache.entry_mask);
while (extent_per_way) {
for (i = 0; i < cpu_data->dcache.ways; i++)
__raw_writel(0, addr + cpu_data->dcache.way_incr * i);
addr += cpu_data->dcache.linesz;
extent_per_way -= cpu_data->dcache.linesz;
}
}
static void __flush_dcache_segment_1way(unsigned long start,
unsigned long extent_per_way)
{
@@ -655,24 +697,30 @@ extern void __weak sh4__flush_region_init(void);
*/
void __init sh4_cache_init(void)
{
unsigned int wt_enabled = !!(__raw_readl(CCR) & CCR_CACHE_WT);
printk("PVR=%08x CVR=%08x PRR=%08x\n",
ctrl_inl(CCN_PVR),
ctrl_inl(CCN_CVR),
ctrl_inl(CCN_PRR));
switch (boot_cpu_data.dcache.ways) {
case 1:
__flush_dcache_segment_fn = __flush_dcache_segment_1way;
break;
case 2:
__flush_dcache_segment_fn = __flush_dcache_segment_2way;
break;
case 4:
__flush_dcache_segment_fn = __flush_dcache_segment_4way;
break;
default:
panic("unknown number of cache ways\n");
break;
if (wt_enabled)
__flush_dcache_segment_fn = __flush_dcache_segment_writethrough;
else {
switch (boot_cpu_data.dcache.ways) {
case 1:
__flush_dcache_segment_fn = __flush_dcache_segment_1way;
break;
case 2:
__flush_dcache_segment_fn = __flush_dcache_segment_2way;
break;
case 4:
__flush_dcache_segment_fn = __flush_dcache_segment_4way;
break;
default:
panic("unknown number of cache ways\n");
break;
}
}
local_flush_icache_range = sh4_flush_icache_range;

View File

@@ -57,14 +57,6 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
if (is_pci_memory_fixed_range(phys_addr, size))
return (void __iomem *)phys_addr;
#if !defined(CONFIG_PMB_FIXED)
/*
* Don't allow anybody to remap normal RAM that we're using..
*/
if (phys_addr < virt_to_phys(high_memory))
return NULL;
#endif
/*
* Mappings have to be page-aligned
*/

View File

@@ -43,9 +43,12 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
*/
ctrl_outl(pte.pte_high, MMU_PTEA);
#else
if (cpu_data->flags & CPU_HAS_PTEA)
/* TODO: make this look less hacky */
ctrl_outl(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA);
if (cpu_data->flags & CPU_HAS_PTEA) {
/* The last 3 bits and the first one of pteval contains
* the PTEA timing control and space attribute bits
*/
ctrl_outl(copy_ptea_attributes(pteval), MMU_PTEA);
}
#endif
/* Set PTEL register */