Merge branch 'core-efi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 EFI update from Peter Anvin: "EFI tree, from Matt Fleming. Most of the patches are the new efivarfs filesystem by Matt Garrett & co. The balance are support for EFI wallclock in the absence of a hardware-specific driver, and various fixes and cleanups." * 'core-efi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits) efivarfs: Make efivarfs_fill_super() static x86, efi: Check table header length in efi_bgrt_init() efivarfs: Use query_variable_info() to limit kmalloc() efivarfs: Fix return value of efivarfs_file_write() efivarfs: Return a consistent error when efivarfs_get_inode() fails efivarfs: Make 'datasize' unsigned long efivarfs: Add unique magic number efivarfs: Replace magic number with sizeof(attributes) efivarfs: Return an error if we fail to read a variable efi: Clarify GUID length calculations efivarfs: Implement exclusive access for {get,set}_variable efivarfs: efivarfs_fill_super() ensure we clean up correctly on error efivarfs: efivarfs_fill_super() ensure we free our temporary name efivarfs: efivarfs_fill_super() fix inode reference counts efivarfs: efivarfs_create() ensure we drop our reference on inode on error efivarfs: efivarfs_file_read ensure we free data in error paths x86-64/efi: Use EFI to deal with platform wall clock (again) x86/kernel: remove tboot 1:1 page table creation code x86, efi: 1:1 pagetable mapping for virtual EFI calls x86, mm: Include the entire kernel memory map in trampoline_pgd ...
This commit is contained in:
@@ -108,13 +108,13 @@ void sync_global_pgds(unsigned long start, unsigned long end)
|
||||
for (address = start; address <= end; address += PGDIR_SIZE) {
|
||||
const pgd_t *pgd_ref = pgd_offset_k(address);
|
||||
struct page *page;
|
||||
pgd_t *pgd;
|
||||
|
||||
if (pgd_none(*pgd_ref))
|
||||
continue;
|
||||
|
||||
spin_lock(&pgd_lock);
|
||||
list_for_each_entry(page, &pgd_list, lru) {
|
||||
pgd_t *pgd;
|
||||
spinlock_t *pgt_lock;
|
||||
|
||||
pgd = (pgd_t *)page_address(page) + pgd_index(address);
|
||||
@@ -130,6 +130,13 @@ void sync_global_pgds(unsigned long start, unsigned long end)
|
||||
|
||||
spin_unlock(pgt_lock);
|
||||
}
|
||||
|
||||
pgd = __va(real_mode_header->trampoline_pgd);
|
||||
pgd += pgd_index(address);
|
||||
|
||||
if (pgd_none(*pgd))
|
||||
set_pgd(pgd, *pgd_ref);
|
||||
|
||||
spin_unlock(&pgd_lock);
|
||||
}
|
||||
}
|
||||
|
@@ -50,6 +50,107 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size,
|
||||
return err;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
static void ident_pte_range(unsigned long paddr, unsigned long vaddr,
|
||||
pmd_t *ppmd, pmd_t *vpmd, unsigned long end)
|
||||
{
|
||||
pte_t *ppte = pte_offset_kernel(ppmd, paddr);
|
||||
pte_t *vpte = pte_offset_kernel(vpmd, vaddr);
|
||||
|
||||
do {
|
||||
set_pte(ppte, *vpte);
|
||||
} while (ppte++, vpte++, vaddr += PAGE_SIZE, vaddr != end);
|
||||
}
|
||||
|
||||
static int ident_pmd_range(unsigned long paddr, unsigned long vaddr,
|
||||
pud_t *ppud, pud_t *vpud, unsigned long end)
|
||||
{
|
||||
pmd_t *ppmd = pmd_offset(ppud, paddr);
|
||||
pmd_t *vpmd = pmd_offset(vpud, vaddr);
|
||||
unsigned long next;
|
||||
|
||||
do {
|
||||
next = pmd_addr_end(vaddr, end);
|
||||
|
||||
if (!pmd_present(*ppmd)) {
|
||||
pte_t *ppte = (pte_t *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!ppte)
|
||||
return 1;
|
||||
|
||||
set_pmd(ppmd, __pmd(_KERNPG_TABLE | __pa(ppte)));
|
||||
}
|
||||
|
||||
ident_pte_range(paddr, vaddr, ppmd, vpmd, next);
|
||||
} while (ppmd++, vpmd++, vaddr = next, vaddr != end);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ident_pud_range(unsigned long paddr, unsigned long vaddr,
|
||||
pgd_t *ppgd, pgd_t *vpgd, unsigned long end)
|
||||
{
|
||||
pud_t *ppud = pud_offset(ppgd, paddr);
|
||||
pud_t *vpud = pud_offset(vpgd, vaddr);
|
||||
unsigned long next;
|
||||
|
||||
do {
|
||||
next = pud_addr_end(vaddr, end);
|
||||
|
||||
if (!pud_present(*ppud)) {
|
||||
pmd_t *ppmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!ppmd)
|
||||
return 1;
|
||||
|
||||
set_pud(ppud, __pud(_KERNPG_TABLE | __pa(ppmd)));
|
||||
}
|
||||
|
||||
if (ident_pmd_range(paddr, vaddr, ppud, vpud, next))
|
||||
return 1;
|
||||
} while (ppud++, vpud++, vaddr = next, vaddr != end);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int insert_identity_mapping(resource_size_t paddr, unsigned long vaddr,
|
||||
unsigned long size)
|
||||
{
|
||||
unsigned long end = vaddr + size;
|
||||
unsigned long next;
|
||||
pgd_t *vpgd, *ppgd;
|
||||
|
||||
/* Don't map over the guard hole. */
|
||||
if (paddr >= 0x800000000000 || paddr + size > 0x800000000000)
|
||||
return 1;
|
||||
|
||||
ppgd = __va(real_mode_header->trampoline_pgd) + pgd_index(paddr);
|
||||
|
||||
vpgd = pgd_offset_k(vaddr);
|
||||
do {
|
||||
next = pgd_addr_end(vaddr, end);
|
||||
|
||||
if (!pgd_present(*ppgd)) {
|
||||
pud_t *ppud = (pud_t *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!ppud)
|
||||
return 1;
|
||||
|
||||
set_pgd(ppgd, __pgd(_KERNPG_TABLE | __pa(ppud)));
|
||||
}
|
||||
|
||||
if (ident_pud_range(paddr, vaddr, ppgd, vpgd, next))
|
||||
return 1;
|
||||
} while (ppgd++, vpgd++, vaddr = next, vaddr != end);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static inline int insert_identity_mapping(resource_size_t paddr,
|
||||
unsigned long vaddr,
|
||||
unsigned long size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
/*
|
||||
* Remap an arbitrary physical address space into the kernel virtual
|
||||
* address space. Needed when the kernel wants to access high addresses
|
||||
@@ -163,6 +264,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
|
||||
ret_addr = (void __iomem *) (vaddr + offset);
|
||||
mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
|
||||
|
||||
if (insert_identity_mapping(phys_addr, vaddr, size))
|
||||
printk(KERN_WARNING "ioremap: unable to map 0x%llx in identity pagetable\n",
|
||||
(unsigned long long)phys_addr);
|
||||
|
||||
/*
|
||||
* Check if the request spans more than any BAR in the iomem resource
|
||||
* tree.
|
||||
|
@@ -919,11 +919,13 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
|
||||
|
||||
/*
|
||||
* On success we use clflush, when the CPU supports it to
|
||||
* avoid the wbindv. If the CPU does not support it and in the
|
||||
* error case we fall back to cpa_flush_all (which uses
|
||||
* wbindv):
|
||||
* avoid the wbindv. If the CPU does not support it, in the
|
||||
* error case, and during early boot (for EFI) we fall back
|
||||
* to cpa_flush_all (which uses wbinvd):
|
||||
*/
|
||||
if (!ret && cpu_has_clflush) {
|
||||
if (early_boot_irqs_disabled)
|
||||
__cpa_flush_all((void *)(long)cache);
|
||||
else if (!ret && cpu_has_clflush) {
|
||||
if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
|
||||
cpa_flush_array(addr, numpages, cache,
|
||||
cpa.flags, pages);
|
||||
|
Reference in New Issue
Block a user