KVM: x86: move MTRR related code to a separate file
MTRR code locates in x86.c and mmu.c so that move them to a separate file to make the organization more clearer and it will be the place where we fully implement vMTRR Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
此提交包含在:
@@ -2437,109 +2437,6 @@ int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page);
|
||||
|
||||
/*
|
||||
* The function is based on mtrr_type_lookup() in
|
||||
* arch/x86/kernel/cpu/mtrr/generic.c
|
||||
*/
|
||||
static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
|
||||
u64 start, u64 end)
|
||||
{
|
||||
u64 base, mask;
|
||||
u8 prev_match, curr_match;
|
||||
int i, num_var_ranges = KVM_NR_VAR_MTRR;
|
||||
|
||||
/* MTRR is completely disabled, use UC for all of physical memory. */
|
||||
if (!(mtrr_state->enabled & 0x2))
|
||||
return MTRR_TYPE_UNCACHABLE;
|
||||
|
||||
/* Make end inclusive end, instead of exclusive */
|
||||
end--;
|
||||
|
||||
/* Look in fixed ranges. Just return the type as per start */
|
||||
if (mtrr_state->have_fixed && (mtrr_state->enabled & 0x1) &&
|
||||
(start < 0x100000)) {
|
||||
int idx;
|
||||
|
||||
if (start < 0x80000) {
|
||||
idx = 0;
|
||||
idx += (start >> 16);
|
||||
return mtrr_state->fixed_ranges[idx];
|
||||
} else if (start < 0xC0000) {
|
||||
idx = 1 * 8;
|
||||
idx += ((start - 0x80000) >> 14);
|
||||
return mtrr_state->fixed_ranges[idx];
|
||||
} else if (start < 0x1000000) {
|
||||
idx = 3 * 8;
|
||||
idx += ((start - 0xC0000) >> 12);
|
||||
return mtrr_state->fixed_ranges[idx];
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Look in variable ranges
|
||||
* Look of multiple ranges matching this address and pick type
|
||||
* as per MTRR precedence
|
||||
*/
|
||||
prev_match = 0xFF;
|
||||
for (i = 0; i < num_var_ranges; ++i) {
|
||||
unsigned short start_state, end_state;
|
||||
|
||||
if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11)))
|
||||
continue;
|
||||
|
||||
base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) +
|
||||
(mtrr_state->var_ranges[i].base_lo & PAGE_MASK);
|
||||
mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) +
|
||||
(mtrr_state->var_ranges[i].mask_lo & PAGE_MASK);
|
||||
|
||||
start_state = ((start & mask) == (base & mask));
|
||||
end_state = ((end & mask) == (base & mask));
|
||||
if (start_state != end_state)
|
||||
return 0xFE;
|
||||
|
||||
if ((start & mask) != (base & mask))
|
||||
continue;
|
||||
|
||||
curr_match = mtrr_state->var_ranges[i].base_lo & 0xff;
|
||||
if (prev_match == 0xFF) {
|
||||
prev_match = curr_match;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (prev_match == MTRR_TYPE_UNCACHABLE ||
|
||||
curr_match == MTRR_TYPE_UNCACHABLE)
|
||||
return MTRR_TYPE_UNCACHABLE;
|
||||
|
||||
if ((prev_match == MTRR_TYPE_WRBACK &&
|
||||
curr_match == MTRR_TYPE_WRTHROUGH) ||
|
||||
(prev_match == MTRR_TYPE_WRTHROUGH &&
|
||||
curr_match == MTRR_TYPE_WRBACK)) {
|
||||
prev_match = MTRR_TYPE_WRTHROUGH;
|
||||
curr_match = MTRR_TYPE_WRTHROUGH;
|
||||
}
|
||||
|
||||
if (prev_match != curr_match)
|
||||
return MTRR_TYPE_UNCACHABLE;
|
||||
}
|
||||
|
||||
if (prev_match != 0xFF)
|
||||
return prev_match;
|
||||
|
||||
return mtrr_state->def_type;
|
||||
}
|
||||
|
||||
u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
|
||||
{
|
||||
u8 mtrr;
|
||||
|
||||
mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT,
|
||||
(gfn << PAGE_SHIFT) + PAGE_SIZE);
|
||||
if (mtrr == 0xfe || mtrr == 0xff)
|
||||
mtrr = MTRR_TYPE_WRBACK;
|
||||
return mtrr;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type);
|
||||
|
||||
static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
|
||||
{
|
||||
trace_kvm_mmu_unsync_page(sp);
|
||||
|
新增問題並參考
封鎖使用者