KVM: Terminate memslot walks via used_slots
Refactor memslot handling to treat the number of used slots as the de facto size of the memslot array, e.g. return NULL from id_to_memslot() when an invalid index is provided instead of relying on npages==0 to detect an invalid memslot. Rework the sorting and walking of memslots in advance of dynamically sizing memslots to aid bisection and debug, e.g. with luck, a bug in the refactoring will bisect here and/or hit a WARN instead of randomly corrupting memory. Alternatively, a global null/invalid memslot could be returned, i.e. so callers of id_to_memslot() don't have to explicitly check for a NULL memslot, but that approach runs the risk of introducing difficult-to- debug issues, e.g. if the global null slot is modified. Constifying the return from id_to_memslot() to combat such issues is possible, but would require a massive refactoring of arch specific code and would still be susceptible to casting shenanigans. Add function comments to update_memslots() and search_memslots() to explicitly (and loudly) state how memslots are sorted. Opportunistically stuff @hva with a non-canonical value when deleting a private memslot on x86 to detect bogus usage of the freed slot. No functional change intended. Tested-by: Christoffer Dall <christoffer.dall@arm.com> Tested-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:

committed by
Paolo Bonzini

parent
2a49f61dfc
commit
0577d1abe7
@@ -572,10 +572,11 @@ static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
|
||||
return vcpu->vcpu_idx;
|
||||
}
|
||||
|
||||
#define kvm_for_each_memslot(memslot, slots) \
|
||||
for (memslot = &slots->memslots[0]; \
|
||||
memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
|
||||
memslot++)
|
||||
#define kvm_for_each_memslot(memslot, slots) \
|
||||
for (memslot = &slots->memslots[0]; \
|
||||
memslot < slots->memslots + slots->used_slots; memslot++) \
|
||||
if (WARN_ON_ONCE(!memslot->npages)) { \
|
||||
} else
|
||||
|
||||
void kvm_vcpu_destroy(struct kvm_vcpu *vcpu);
|
||||
|
||||
@@ -635,12 +636,15 @@ static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
|
||||
return __kvm_memslots(vcpu->kvm, as_id);
|
||||
}
|
||||
|
||||
static inline struct kvm_memory_slot *
|
||||
id_to_memslot(struct kvm_memslots *slots, int id)
|
||||
static inline
|
||||
struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
|
||||
{
|
||||
int index = slots->id_to_index[id];
|
||||
struct kvm_memory_slot *slot;
|
||||
|
||||
if (index < 0)
|
||||
return NULL;
|
||||
|
||||
slot = &slots->memslots[index];
|
||||
|
||||
WARN_ON(slot->id != id);
|
||||
@@ -1012,6 +1016,8 @@ bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
|
||||
* used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
|
||||
* gfn_to_memslot() itself isn't here as an inline because that would
|
||||
* bloat other code too much.
|
||||
*
|
||||
* IMPORTANT: Slots are sorted from highest GFN to lowest GFN!
|
||||
*/
|
||||
static inline struct kvm_memory_slot *
|
||||
search_memslots(struct kvm_memslots *slots, gfn_t gfn)
|
||||
|
Reference in New Issue
Block a user