arm64: add early_ioremap support
Add support for early IO or memory mappings which are needed before the normal ioremap() is usable. This also adds fixmap support for permanent fixed mappings such as that used by the earlyprintk device register region. Signed-off-by: Mark Salter <msalter@redhat.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Borislav Petkov <borislav.petkov@amd.com> Cc: Dave Young <dyoung@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Этот коммит содержится в:

коммит произвёл
Linus Torvalds

родитель
0bf757c73d
Коммит
bf4b558eba
@@ -25,6 +25,10 @@
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/pgalloc.h>
|
||||
|
||||
static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
|
||||
pgprot_t prot, void *caller)
|
||||
{
|
||||
@@ -98,3 +102,84 @@ void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(ioremap_cache);
|
||||
|
||||
#ifndef CONFIG_ARM64_64K_PAGES
|
||||
static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
|
||||
#endif
|
||||
|
||||
static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
|
||||
pgd = pgd_offset_k(addr);
|
||||
BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
|
||||
|
||||
pud = pud_offset(pgd, addr);
|
||||
BUG_ON(pud_none(*pud) || pud_bad(*pud));
|
||||
|
||||
return pmd_offset(pud, addr);
|
||||
}
|
||||
|
||||
static inline pte_t * __init early_ioremap_pte(unsigned long addr)
|
||||
{
|
||||
pmd_t *pmd = early_ioremap_pmd(addr);
|
||||
|
||||
BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
|
||||
|
||||
return pte_offset_kernel(pmd, addr);
|
||||
}
|
||||
|
||||
void __init early_ioremap_init(void)
|
||||
{
|
||||
pmd_t *pmd;
|
||||
|
||||
pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
|
||||
#ifndef CONFIG_ARM64_64K_PAGES
|
||||
/* need to populate pmd for 4k pagesize only */
|
||||
pmd_populate_kernel(&init_mm, pmd, bm_pte);
|
||||
#endif
|
||||
/*
|
||||
* The boot-ioremap range spans multiple pmds, for which
|
||||
* we are not prepared:
|
||||
*/
|
||||
BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
|
||||
!= (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
|
||||
|
||||
if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
|
||||
WARN_ON(1);
|
||||
pr_warn("pmd %p != %p\n",
|
||||
pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
|
||||
pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
|
||||
fix_to_virt(FIX_BTMAP_BEGIN));
|
||||
pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
|
||||
fix_to_virt(FIX_BTMAP_END));
|
||||
|
||||
pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
|
||||
pr_warn("FIX_BTMAP_BEGIN: %d\n",
|
||||
FIX_BTMAP_BEGIN);
|
||||
}
|
||||
|
||||
early_ioremap_setup();
|
||||
}
|
||||
|
||||
void __init __early_set_fixmap(enum fixed_addresses idx,
|
||||
phys_addr_t phys, pgprot_t flags)
|
||||
{
|
||||
unsigned long addr = __fix_to_virt(idx);
|
||||
pte_t *pte;
|
||||
|
||||
if (idx >= __end_of_fixed_addresses) {
|
||||
BUG();
|
||||
return;
|
||||
}
|
||||
|
||||
pte = early_ioremap_pte(addr);
|
||||
|
||||
if (pgprot_val(flags))
|
||||
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
|
||||
else {
|
||||
pte_clear(&init_mm, addr, pte);
|
||||
flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
Ссылка в новой задаче
Block a user