Merge branch 'fixes' into next

Merge our fixes branch into next, this brings in a number of commits
that fix bugs we don't want to hit in next, in particular the fix for
CVE-2019-12817.
This commit is contained in:
Michael Ellerman
2019-07-01 14:04:39 +10:00
21 ha cambiato i file con 230 aggiunte e 24 eliminazioni

Vedi File

@@ -55,20 +55,52 @@ EXPORT_SYMBOL_GPL(hash__alloc_context_id);
void slb_setup_new_exec(void);
static int realloc_context_ids(mm_context_t *ctx)
{
int i, id;
/*
* id 0 (aka. ctx->id) is special, we always allocate a new one, even if
* there wasn't one allocated previously (which happens in the exec
* case where ctx is newly allocated).
*
* We have to be a bit careful here. We must keep the existing ids in
* the array, so that we can test if they're non-zero to decide if we
* need to allocate a new one. However in case of error we must free the
* ids we've allocated but *not* any of the existing ones (or risk a
* UAF). That's why we decrement i at the start of the error handling
* loop, to skip the id that we just tested but couldn't reallocate.
*/
for (i = 0; i < ARRAY_SIZE(ctx->extended_id); i++) {
if (i == 0 || ctx->extended_id[i]) {
id = hash__alloc_context_id();
if (id < 0)
goto error;
ctx->extended_id[i] = id;
}
}
/* The caller expects us to return id */
return ctx->id;
error:
for (i--; i >= 0; i--) {
if (ctx->extended_id[i])
ida_free(&mmu_context_ida, ctx->extended_id[i]);
}
return id;
}
static int hash__init_new_context(struct mm_struct *mm)
{
int index;
index = hash__alloc_context_id();
if (index < 0)
return index;
mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context),
GFP_KERNEL);
if (!mm->context.hash_context) {
ida_free(&mmu_context_ida, index);
if (!mm->context.hash_context)
return -ENOMEM;
}
/*
* The old code would re-promote on fork, we don't do that when using
@@ -96,13 +128,20 @@ static int hash__init_new_context(struct mm_struct *mm)
mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table),
GFP_KERNEL);
if (!mm->context.hash_context->spt) {
ida_free(&mmu_context_ida, index);
kfree(mm->context.hash_context);
return -ENOMEM;
}
}
#endif
}
index = realloc_context_ids(&mm->context);
if (index < 0) {
#ifdef CONFIG_PPC_SUBPAGE_PROT
kfree(mm->context.hash_context->spt);
#endif
kfree(mm->context.hash_context);
return index;
}
pkey_mm_init(mm);

Vedi File

@@ -116,6 +116,9 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
/*
* This ensures that generic code that rely on IRQ disabling
* to prevent a parallel THP split work as expected.
*
* Marking the entry with _PAGE_INVALID && ~_PAGE_PRESENT requires
* a special case check in pmd_access_permitted.
*/
serialize_against_pte_lookup(vma->vm_mm);
return __pmd(old_pmd);

Vedi File

@@ -253,7 +253,8 @@ void __init paging_init(void)
(long int)((top_of_ram - total_ram) >> 20));
#ifdef CONFIG_ZONE_DMA
max_zone_pfns[ZONE_DMA] = min(max_low_pfn, 0x7fffffffUL >> PAGE_SHIFT);
max_zone_pfns[ZONE_DMA] = min(max_low_pfn,
((1UL << ARCH_ZONE_DMA_BITS) - 1) >> PAGE_SHIFT);
#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
#ifdef CONFIG_HIGHMEM

Vedi File

@@ -372,13 +372,25 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
pdshift = PMD_SHIFT;
pmdp = pmd_offset(&pud, ea);
pmd = READ_ONCE(*pmdp);
/*
* A hugepage collapse is captured by pmd_none, because
* it mark the pmd none and do a hpte invalidate.
* A hugepage collapse is captured by this condition, see
* pmdp_collapse_flush.
*/
if (pmd_none(pmd))
return NULL;
#ifdef CONFIG_PPC_BOOK3S_64
/*
* A hugepage split is captured by this condition, see
* pmdp_invalidate.
*
* Huge page modification can be caught here too.
*/
if (pmd_is_serializing(pmd))
return NULL;
#endif
if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {
if (is_thp)
*is_thp = true;