Merge branches 'arnd-fixes', 'clk', 'misc', 'v7' and 'fixes' into for-next
This commit is contained in:
@@ -1112,22 +1112,22 @@ void __init sanity_check_meminfo(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the first non-section-aligned page, and point
|
||||
* Find the first non-pmd-aligned page, and point
|
||||
* memblock_limit at it. This relies on rounding the
|
||||
* limit down to be section-aligned, which happens at
|
||||
* the end of this function.
|
||||
* limit down to be pmd-aligned, which happens at the
|
||||
* end of this function.
|
||||
*
|
||||
* With this algorithm, the start or end of almost any
|
||||
* bank can be non-section-aligned. The only exception
|
||||
* is that the start of the bank 0 must be section-
|
||||
* bank can be non-pmd-aligned. The only exception is
|
||||
* that the start of the bank 0 must be section-
|
||||
* aligned, since otherwise memory would need to be
|
||||
* allocated when mapping the start of bank 0, which
|
||||
* occurs before any free memory is mapped.
|
||||
*/
|
||||
if (!memblock_limit) {
|
||||
if (!IS_ALIGNED(block_start, SECTION_SIZE))
|
||||
if (!IS_ALIGNED(block_start, PMD_SIZE))
|
||||
memblock_limit = block_start;
|
||||
else if (!IS_ALIGNED(block_end, SECTION_SIZE))
|
||||
else if (!IS_ALIGNED(block_end, PMD_SIZE))
|
||||
memblock_limit = arm_lowmem_limit;
|
||||
}
|
||||
|
||||
@@ -1137,12 +1137,12 @@ void __init sanity_check_meminfo(void)
|
||||
high_memory = __va(arm_lowmem_limit - 1) + 1;
|
||||
|
||||
/*
|
||||
* Round the memblock limit down to a section size. This
|
||||
* Round the memblock limit down to a pmd size. This
|
||||
* helps to ensure that we will allocate memory from the
|
||||
* last full section, which should be mapped.
|
||||
* last full pmd, which should be mapped.
|
||||
*/
|
||||
if (memblock_limit)
|
||||
memblock_limit = round_down(memblock_limit, SECTION_SIZE);
|
||||
memblock_limit = round_down(memblock_limit, PMD_SIZE);
|
||||
if (!memblock_limit)
|
||||
memblock_limit = arm_lowmem_limit;
|
||||
|
||||
@@ -1387,123 +1387,98 @@ static void __init map_lowmem(void)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
#ifdef CONFIG_ARM_PV_FIXUP
|
||||
extern unsigned long __atags_pointer;
|
||||
typedef void pgtables_remap(long long offset, unsigned long pgd, void *bdata);
|
||||
pgtables_remap lpae_pgtables_remap_asm;
|
||||
|
||||
/*
|
||||
* early_paging_init() recreates boot time page table setup, allowing machines
|
||||
* to switch over to a high (>4G) address space on LPAE systems
|
||||
*/
|
||||
void __init early_paging_init(const struct machine_desc *mdesc,
|
||||
struct proc_info_list *procinfo)
|
||||
void __init early_paging_init(const struct machine_desc *mdesc)
|
||||
{
|
||||
pmdval_t pmdprot = procinfo->__cpu_mm_mmu_flags;
|
||||
unsigned long map_start, map_end;
|
||||
pgd_t *pgd0, *pgdk;
|
||||
pud_t *pud0, *pudk, *pud_start;
|
||||
pmd_t *pmd0, *pmdk;
|
||||
phys_addr_t phys;
|
||||
int i;
|
||||
pgtables_remap *lpae_pgtables_remap;
|
||||
unsigned long pa_pgd;
|
||||
unsigned int cr, ttbcr;
|
||||
long long offset;
|
||||
void *boot_data;
|
||||
|
||||
if (!(mdesc->init_meminfo))
|
||||
if (!mdesc->pv_fixup)
|
||||
return;
|
||||
|
||||
/* remap kernel code and data */
|
||||
map_start = init_mm.start_code & PMD_MASK;
|
||||
map_end = ALIGN(init_mm.brk, PMD_SIZE);
|
||||
offset = mdesc->pv_fixup();
|
||||
if (offset == 0)
|
||||
return;
|
||||
|
||||
/* get a handle on things... */
|
||||
pgd0 = pgd_offset_k(0);
|
||||
pud_start = pud0 = pud_offset(pgd0, 0);
|
||||
pmd0 = pmd_offset(pud0, 0);
|
||||
/*
|
||||
* Get the address of the remap function in the 1:1 identity
|
||||
* mapping setup by the early page table assembly code. We
|
||||
* must get this prior to the pv update. The following barrier
|
||||
* ensures that this is complete before we fixup any P:V offsets.
|
||||
*/
|
||||
lpae_pgtables_remap = (pgtables_remap *)(unsigned long)__pa(lpae_pgtables_remap_asm);
|
||||
pa_pgd = __pa(swapper_pg_dir);
|
||||
boot_data = __va(__atags_pointer);
|
||||
barrier();
|
||||
|
||||
pgdk = pgd_offset_k(map_start);
|
||||
pudk = pud_offset(pgdk, map_start);
|
||||
pmdk = pmd_offset(pudk, map_start);
|
||||
pr_info("Switching physical address space to 0x%08llx\n",
|
||||
(u64)PHYS_OFFSET + offset);
|
||||
|
||||
mdesc->init_meminfo();
|
||||
/* Re-set the phys pfn offset, and the pv offset */
|
||||
__pv_offset += offset;
|
||||
__pv_phys_pfn_offset += PFN_DOWN(offset);
|
||||
|
||||
/* Run the patch stub to update the constants */
|
||||
fixup_pv_table(&__pv_table_begin,
|
||||
(&__pv_table_end - &__pv_table_begin) << 2);
|
||||
|
||||
/*
|
||||
* Cache cleaning operations for self-modifying code
|
||||
* We should clean the entries by MVA but running a
|
||||
* for loop over every pv_table entry pointer would
|
||||
* just complicate the code.
|
||||
*/
|
||||
flush_cache_louis();
|
||||
dsb(ishst);
|
||||
isb();
|
||||
|
||||
/*
|
||||
* FIXME: This code is not architecturally compliant: we modify
|
||||
* the mappings in-place, indeed while they are in use by this
|
||||
* very same code. This may lead to unpredictable behaviour of
|
||||
* the CPU.
|
||||
*
|
||||
* Even modifying the mappings in a separate page table does
|
||||
* not resolve this.
|
||||
*
|
||||
* The architecture strongly recommends that when a mapping is
|
||||
* changed, that it is changed by first going via an invalid
|
||||
* mapping and back to the new mapping. This is to ensure that
|
||||
* no TLB conflicts (caused by the TLB having more than one TLB
|
||||
* entry match a translation) can occur. However, doing that
|
||||
* here will result in unmapping the code we are running.
|
||||
*/
|
||||
pr_warn("WARNING: unsafe modification of in-place page tables - tainting kernel\n");
|
||||
add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
|
||||
|
||||
/*
|
||||
* Remap level 1 table. This changes the physical addresses
|
||||
* used to refer to the level 2 page tables to the high
|
||||
* physical address alias, leaving everything else the same.
|
||||
*/
|
||||
for (i = 0; i < PTRS_PER_PGD; pud0++, i++) {
|
||||
set_pud(pud0,
|
||||
__pud(__pa(pmd0) | PMD_TYPE_TABLE | L_PGD_SWAPPER));
|
||||
pmd0 += PTRS_PER_PMD;
|
||||
}
|
||||
|
||||
/*
|
||||
* Remap the level 2 table, pointing the mappings at the high
|
||||
* physical address alias of these pages.
|
||||
*/
|
||||
phys = __pa(map_start);
|
||||
do {
|
||||
*pmdk++ = __pmd(phys | pmdprot);
|
||||
phys += PMD_SIZE;
|
||||
} while (phys < map_end);
|
||||
|
||||
/*
|
||||
* Ensure that the above updates are flushed out of the cache.
|
||||
* This is not strictly correct; on a system where the caches
|
||||
* are coherent with each other, but the MMU page table walks
|
||||
* may not be coherent, flush_cache_all() may be a no-op, and
|
||||
* this will fail.
|
||||
* We changing not only the virtual to physical mapping, but also
|
||||
* the physical addresses used to access memory. We need to flush
|
||||
* all levels of cache in the system with caching disabled to
|
||||
* ensure that all data is written back, and nothing is prefetched
|
||||
* into the caches. We also need to prevent the TLB walkers
|
||||
* allocating into the caches too. Note that this is ARMv7 LPAE
|
||||
* specific.
|
||||
*/
|
||||
cr = get_cr();
|
||||
set_cr(cr & ~(CR_I | CR_C));
|
||||
asm("mrc p15, 0, %0, c2, c0, 2" : "=r" (ttbcr));
|
||||
asm volatile("mcr p15, 0, %0, c2, c0, 2"
|
||||
: : "r" (ttbcr & ~(3 << 8 | 3 << 10)));
|
||||
flush_cache_all();
|
||||
|
||||
/*
|
||||
* Re-write the TTBR values to point them at the high physical
|
||||
* alias of the page tables. We expect __va() will work on
|
||||
* cpu_get_pgd(), which returns the value of TTBR0.
|
||||
* Fixup the page tables - this must be in the idmap region as
|
||||
* we need to disable the MMU to do this safely, and hence it
|
||||
* needs to be assembly. It's fairly simple, as we're using the
|
||||
* temporary tables setup by the initial assembly code.
|
||||
*/
|
||||
cpu_switch_mm(pgd0, &init_mm);
|
||||
cpu_set_ttbr(1, __pa(pgd0) + TTBR1_OFFSET);
|
||||
lpae_pgtables_remap(offset, pa_pgd, boot_data);
|
||||
|
||||
/* Finally flush any stale TLB values. */
|
||||
local_flush_bp_all();
|
||||
local_flush_tlb_all();
|
||||
/* Re-enable the caches and cacheable TLB walks */
|
||||
asm volatile("mcr p15, 0, %0, c2, c0, 2" : : "r" (ttbcr));
|
||||
set_cr(cr);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
void __init early_paging_init(const struct machine_desc *mdesc,
|
||||
struct proc_info_list *procinfo)
|
||||
void __init early_paging_init(const struct machine_desc *mdesc)
|
||||
{
|
||||
if (mdesc->init_meminfo)
|
||||
mdesc->init_meminfo();
|
||||
long long offset;
|
||||
|
||||
if (!mdesc->pv_fixup)
|
||||
return;
|
||||
|
||||
offset = mdesc->pv_fixup();
|
||||
if (offset == 0)
|
||||
return;
|
||||
|
||||
pr_crit("Physical address space modification is only to support Keystone2.\n");
|
||||
pr_crit("Please enable ARM_LPAE and ARM_PATCH_PHYS_VIRT support to use this\n");
|
||||
pr_crit("feature. Your kernel may crash now, have a good day.\n");
|
||||
add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
Reference in New Issue
Block a user