powerpc/64s: Remove POWER9 DD1 support

POWER9 DD1 was never a product. It is no longer supported by upstream
firmware, and it is not effectively supported in Linux due to lack of
testing.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Reviewed-by: Michael Ellerman <mpe@ellerman.id.au>
[mpe: Remove arch_make_huge_pte() entirely]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Nicholas Piggin
2018-07-05 18:47:00 +10:00
committed by Michael Ellerman
parent ce397d215c
commit 2bf1071a8d
32 changed files with 65 additions and 532 deletions

View File

@@ -66,10 +66,7 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
bits = root & RPDS_MASK;
root = root & RPDB_MASK;
/* P9 DD1 interprets RTS (radix tree size) differently */
offset = rts + 31;
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
offset -= 3;
/* current implementations only support 52-bit space */
if (offset != 52)
@@ -160,17 +157,7 @@ static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
unsigned long clr, unsigned long set,
unsigned long addr, unsigned int shift)
{
unsigned long old = 0;
if (!(clr & _PAGE_PRESENT) && cpu_has_feature(CPU_FTR_POWER9_DD1) &&
pte_present(*ptep)) {
/* have to invalidate it first */
old = __radix_pte_update(ptep, _PAGE_PRESENT, 0);
kvmppc_radix_tlbie_page(kvm, addr, shift);
set |= _PAGE_PRESENT;
old &= _PAGE_PRESENT;
}
return __radix_pte_update(ptep, clr, set) | old;
return __radix_pte_update(ptep, clr, set);
}
void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr,

View File

@@ -1693,14 +1693,6 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
break;
case KVM_REG_PPC_TB_OFFSET:
/*
* POWER9 DD1 has an erratum where writing TBU40 causes
* the timebase to lose ticks. So we don't let the
* timebase offset be changed on P9 DD1. (It is
* initialized to zero.)
*/
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
break;
/* round up to multiple of 2^24 */
vcpu->arch.vcore->tb_offset =
ALIGN(set_reg_val(id, *val), 1UL << 24);
@@ -2026,8 +2018,6 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
/*
* Set the default HFSCR for the guest from the host value.
* This value is only used on POWER9.
* On POWER9 DD1, TM doesn't work, so we make sure to
* prevent the guest from using it.
* On POWER9, we want to virtualize the doorbell facility, so we
* turn off the HFSCR bit, which causes those instructions to trap.
*/

View File

@@ -916,9 +916,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_DAWR)
mtspr SPRN_BESCR, r6
mtspr SPRN_PID, r7
mtspr SPRN_WORT, r8
BEGIN_FTR_SECTION
PPC_INVALIDATE_ERAT
END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
BEGIN_FTR_SECTION
/* POWER8-only registers */
ld r5, VCPU_TCSCR(r4)
@@ -1912,7 +1909,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
ld r5, VCPU_KVM(r9)
lbz r0, KVM_RADIX(r5)
cmpwi cr2, r0, 0
beq cr2, 4f
beq cr2, 2f
/*
* Radix: do eieio; tlbsync; ptesync sequence in case we
@@ -1952,11 +1949,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
bdnz 1b
ptesync
2: /* Flush the ERAT on radix P9 DD1 guest exit */
BEGIN_FTR_SECTION
PPC_INVALIDATE_ERAT
END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
4:
2:
#endif /* CONFIG_PPC_RADIX_MMU */
/*
@@ -3367,11 +3360,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
mtspr SPRN_CIABR, r0
mtspr SPRN_DAWRX, r0
/* Flush the ERAT on radix P9 DD1 guest exit */
BEGIN_FTR_SECTION
PPC_INVALIDATE_ERAT
END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
BEGIN_MMU_FTR_SECTION
b 4f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)

View File

@@ -25,18 +25,6 @@ static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
*/
eieio();
/*
* DD1 bug workaround: If PIPR is less favored than CPPR
* ignore the interrupt or we might incorrectly lose an IPB
* bit.
*/
if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
__be64 qw1 = __x_readq(__x_tima + TM_QW1_OS);
u8 pipr = be64_to_cpu(qw1) & 0xff;
if (pipr >= xc->hw_cppr)
return;
}
/* Perform the acknowledge OS to register cycle. */
ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG));
@@ -89,8 +77,15 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
/* If the XIVE supports the new "store EOI facility, use it */
if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
__x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW)
opal_int_eoi(hw_irq);
else if (xd->flags & XIVE_IRQ_FLAG_LSI) {
/*
* For LSIs the HW EOI cycle is used rather than PQ bits,
* as they are automatically re-triggred in HW when still
* pending.
*/
__x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
} else {
uint64_t eoi_val;
@@ -102,20 +97,12 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
*
* This allows us to then do a re-trigger if Q was set
* rather than synthetizing an interrupt in software
*
* For LSIs, using the HW EOI cycle works around a problem
* on P9 DD1 PHBs where the other ESB accesses don't work
* properly.
*/
if (xd->flags & XIVE_IRQ_FLAG_LSI)
__x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
else {
eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
/* Re-trigger if needed */
if ((eoi_val & 1) && __x_trig_page(xd))
__x_writeq(0, __x_trig_page(xd));
}
/* Re-trigger if needed */
if ((eoi_val & 1) && __x_trig_page(xd))
__x_writeq(0, __x_trig_page(xd));
}
}