summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm/kvm_host.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include/asm/kvm_host.h')
-rw-r--r--arch/x86/include/asm/kvm_host.h11
1 files changed, 6 insertions, 5 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index a565a2e70f30..414691169a04 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1407,9 +1407,8 @@ struct kvm_arch {
* the MMU lock in read mode + RCU or
* the MMU lock in write mode
*
- * For writes, this list is protected by:
- * the MMU lock in read mode + the tdp_mmu_pages_lock or
- * the MMU lock in write mode
+ * For writes, this list is protected by tdp_mmu_pages_lock; see
+ * below for the details.
*
* Roots will remain in the list until their tdp_mmu_root_count
* drops to zero, at which point the thread that decremented the
@@ -1426,8 +1425,10 @@ struct kvm_arch {
* - possible_nx_huge_pages;
* - the possible_nx_huge_page_link field of kvm_mmu_page structs used
* by the TDP MMU
- * It is acceptable, but not necessary, to acquire this lock when
- * the thread holds the MMU lock in write mode.
+ * Because the lock is only taken within the MMU lock, strictly
+ * speaking it is redundant to acquire this lock when the thread
+ * holds the MMU lock in write mode. However it often simplifies
+ * the code to do so.
*/
spinlock_t tdp_mmu_pages_lock;
#endif /* CONFIG_X86_64 */