powerpc/64s/radix: Fix huge vmap false positive
commit 467ba14e1660b52a2f9338b484704c461bd23019 upstream. pmd_huge() is defined to false when HUGETLB_PAGE is not configured, but the vmap code still installs huge PMDs. This leads to false bad PMD errors when vunmapping because it is not seen as a huge PTE, and the bad PMD check catches it. The end result may not be much more serious than some bad pmd warning messages, because the pmd_none_or_clear_bad() does what we wanted and clears the huge PTE anyway. Fix this by checking pmd_is_leaf(), which checks for a PTE regardless of config options. The whole huge/large/leaf stuff is a tangled mess but that's kernel-wide and not something we can improve much in arch/powerpc code. pmd_page(), pud_page(), etc., called by vmalloc_to_page() on huge vmaps can similarly trigger a false VM_BUG_ON when CONFIG_HUGETLB_PAGE=n, so those checks are adjusted. The checks were added by commitd6eacedd1f
("powerpc/book3s: Use config independent helpers for page table walk"), while implementing a similar fix for other page table walking functions. Fixes:d909f9109c
("powerpc/64s/radix: Enable HAVE_ARCH_HUGE_VMAP") Cc: stable@vger.kernel.org # v5.3+ Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20211216103342.609192-1-npiggin@gmail.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:

committed by
Greg Kroah-Hartman

parent
eb44b1386a
commit
e09f47e77b
@@ -1152,7 +1152,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
|
|||||||
|
|
||||||
int pud_clear_huge(pud_t *pud)
|
int pud_clear_huge(pud_t *pud)
|
||||||
{
|
{
|
||||||
if (pud_huge(*pud)) {
|
if (pud_is_leaf(*pud)) {
|
||||||
pud_clear(pud);
|
pud_clear(pud);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
@@ -1199,7 +1199,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
|
|||||||
|
|
||||||
int pmd_clear_huge(pmd_t *pmd)
|
int pmd_clear_huge(pmd_t *pmd)
|
||||||
{
|
{
|
||||||
if (pmd_huge(*pmd)) {
|
if (pmd_is_leaf(*pmd)) {
|
||||||
pmd_clear(pmd);
|
pmd_clear(pmd);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@@ -102,6 +102,7 @@ EXPORT_SYMBOL(__pte_frag_size_shift);
|
|||||||
struct page *p4d_page(p4d_t p4d)
|
struct page *p4d_page(p4d_t p4d)
|
||||||
{
|
{
|
||||||
if (p4d_is_leaf(p4d)) {
|
if (p4d_is_leaf(p4d)) {
|
||||||
|
if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
|
||||||
VM_WARN_ON(!p4d_huge(p4d));
|
VM_WARN_ON(!p4d_huge(p4d));
|
||||||
return pte_page(p4d_pte(p4d));
|
return pte_page(p4d_pte(p4d));
|
||||||
}
|
}
|
||||||
@@ -112,6 +113,7 @@ struct page *p4d_page(p4d_t p4d)
|
|||||||
struct page *pud_page(pud_t pud)
|
struct page *pud_page(pud_t pud)
|
||||||
{
|
{
|
||||||
if (pud_is_leaf(pud)) {
|
if (pud_is_leaf(pud)) {
|
||||||
|
if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
|
||||||
VM_WARN_ON(!pud_huge(pud));
|
VM_WARN_ON(!pud_huge(pud));
|
||||||
return pte_page(pud_pte(pud));
|
return pte_page(pud_pte(pud));
|
||||||
}
|
}
|
||||||
@@ -125,6 +127,12 @@ struct page *pud_page(pud_t pud)
|
|||||||
struct page *pmd_page(pmd_t pmd)
|
struct page *pmd_page(pmd_t pmd)
|
||||||
{
|
{
|
||||||
if (pmd_is_leaf(pmd)) {
|
if (pmd_is_leaf(pmd)) {
|
||||||
|
/*
|
||||||
|
* vmalloc_to_page may be called on any vmap address (not only
|
||||||
|
* vmalloc), and it uses pmd_page() etc., when huge vmap is
|
||||||
|
* enabled so these checks can't be used.
|
||||||
|
*/
|
||||||
|
if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
|
||||||
VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd)));
|
VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd)));
|
||||||
return pte_page(pmd_pte(pmd));
|
return pte_page(pmd_pte(pmd));
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user