summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/mmu/tdp_iter.c
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2022-06-15 02:33:25 +0300
committerPaolo Bonzini <pbonzini@redhat.com>2022-06-20 13:21:28 +0300
commit2ca3129e8045b18eb15431b485d40c028fe8fb00 (patch)
tree00c0d3b178f963a7d4445d9e1c5465d194a5f8d7 /arch/x86/kvm/mmu/tdp_iter.c
parent42c88ff893f06b6ab4eaeb2c37e513edf8c1943b (diff)
downloadlinux-2ca3129e8045b18eb15431b485d40c028fe8fb00.tar.xz
KVM: x86/mmu: Use separate namespaces for guest PTEs and shadow PTEs
Separate the macros for KVM's shadow PTEs (SPTE) from guest 64-bit PTEs (PT64). SPTE and PT64 are _mostly_ the same, but the few differences are quite critical, e.g. *_BASE_ADDR_MASK must differentiate between host and guest physical address spaces, and SPTE_PERM_MASK (was PT64_PERM_MASK) is very much specific to SPTEs. Opportunistically (and temporarily) move most guest macros into paging.h to clearly associate them with shadow paging, and to ensure that they're not used as of this commit. A future patch will eliminate them entirely. Sadly, PT32_LEVEL_BITS is left behind in mmu_internal.h because it's needed for the quadrant calculation in kvm_mmu_get_page(). The quadrant calculation is hot enough (when using shadow paging with 32-bit guests) that adding a per-context helper is undesirable, and burying the computation in paging_tmpl.h with a forward declaration isn't exactly an improvement. Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20220614233328.3896033-6-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu/tdp_iter.c')
-rw-r--r--arch/x86/kvm/mmu/tdp_iter.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/x86/kvm/mmu/tdp_iter.c b/arch/x86/kvm/mmu/tdp_iter.c
index ee4802d7b36c..9c65a64a56d9 100644
--- a/arch/x86/kvm/mmu/tdp_iter.c
+++ b/arch/x86/kvm/mmu/tdp_iter.c
@@ -11,7 +11,7 @@
static void tdp_iter_refresh_sptep(struct tdp_iter *iter)
{
iter->sptep = iter->pt_path[iter->level - 1] +
- SHADOW_PT_INDEX(iter->gfn << PAGE_SHIFT, iter->level);
+ SPTE_INDEX(iter->gfn << PAGE_SHIFT, iter->level);
iter->old_spte = kvm_tdp_mmu_read_spte(iter->sptep);
}
@@ -116,8 +116,8 @@ static bool try_step_side(struct tdp_iter *iter)
* Check if the iterator is already at the end of the current page
* table.
*/
- if (SHADOW_PT_INDEX(iter->gfn << PAGE_SHIFT, iter->level) ==
- (PT64_ENT_PER_PAGE - 1))
+ if (SPTE_INDEX(iter->gfn << PAGE_SHIFT, iter->level) ==
+ (SPTE_ENT_PER_PAGE - 1))
return false;
iter->gfn += KVM_PAGES_PER_HPAGE(iter->level);