summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2022-12-05 17:22:55 +0300
committerMarc Zyngier <maz@kernel.org>2022-12-05 17:22:55 +0300
commitfe8e3f44c58760f0779813938ed465b3297db1ac (patch)
treef0d529b2e7bf99b662134ac7b1dc180a21d0a3b8
parenta937f37d85f3347a1b6de69990fcd214ce882886 (diff)
parent5e806c5812e8012a83496cf96bdba266b3aec428 (diff)
downloadlinux-fe8e3f44c58760f0779813938ed465b3297db1ac.tar.xz
Merge branch kvm-arm64/parallel-faults into kvmarm-master/next
* kvm-arm64/parallel-faults: : . : Parallel stage-2 fault handling, courtesy of Oliver Upton. : From the cover letter: : : "Presently KVM only takes a read lock for stage 2 faults if it believes : the fault can be fixed by relaxing permissions on a PTE (write unprotect : for dirty logging). Otherwise, stage 2 faults grab the write lock, which : predictably can pile up all the vCPUs in a sufficiently large VM. : : Like the TDP MMU for x86, this series loosens the locking around : manipulations of the stage 2 page tables to allow parallel faults. RCU : and atomics are exploited to safely build/destroy the stage 2 page : tables in light of multiple software observers." : . KVM: arm64: Reject shared table walks in the hyp code KVM: arm64: Don't acquire RCU read lock for exclusive table walks KVM: arm64: Take a pointer to walker data in kvm_dereference_pteref() KVM: arm64: Handle stage-2 faults in parallel KVM: arm64: Make table->block changes parallel-aware KVM: arm64: Make leaf->leaf PTE changes parallel-aware KVM: arm64: Make block->table PTE changes parallel-aware KVM: arm64: Split init and set for table PTE KVM: arm64: Atomically update stage 2 leaf attributes in parallel walks KVM: arm64: Protect stage-2 traversal with RCU KVM: arm64: Tear down unlinked stage-2 subtree after break-before-make KVM: arm64: Use an opaque type for pteps KVM: arm64: Add a helper to tear down unlinked stage-2 subtrees KVM: arm64: Don't pass kvm_pgtable through kvm_pgtable_walk_data KVM: arm64: Pass mm_ops through the visitor context KVM: arm64: Stash observed pte value in visitor context KVM: arm64: Combine visitor arguments into a context structure Signed-off-by: Marc Zyngier <maz@kernel.org>
-rw-r--r--arch/arm64/include/asm/kvm_pgtable.h153
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mem_protect.c21
-rw-r--r--arch/arm64/kvm/hyp/nvhe/setup.c22
-rw-r--r--arch/arm64/kvm/hyp/pgtable.c631
-rw-r--r--arch/arm64/kvm/mmu.c53
5 files changed, 508 insertions, 372 deletions
diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
index 3252eb50ecfe..d5cb01f8dc06 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -85,6 +85,8 @@ static inline bool kvm_level_supports_block_mapping(u32 level)
* allocation is physically contiguous.
* @free_pages_exact: Free an exact number of memory pages previously
* allocated by zalloc_pages_exact.
+ * @free_removed_table: Free a removed paging structure by unlinking and
+ * dropping references.
* @get_page: Increment the refcount on a page.
* @put_page: Decrement the refcount on a page. When the
* refcount reaches 0 the page is automatically
@@ -103,6 +105,7 @@ struct kvm_pgtable_mm_ops {
void* (*zalloc_page)(void *arg);
void* (*zalloc_pages_exact)(size_t size);
void (*free_pages_exact)(void *addr, size_t size);
+ void (*free_removed_table)(void *addr, u32 level);
void (*get_page)(void *addr);
void (*put_page)(void *addr);
int (*page_count)(void *addr);
@@ -162,29 +165,6 @@ typedef bool (*kvm_pgtable_force_pte_cb_t)(u64 addr, u64 end,
enum kvm_pgtable_prot prot);
/**
- * struct kvm_pgtable - KVM page-table.
- * @ia_bits: Maximum input address size, in bits.
- * @start_level: Level at which the page-table walk starts.
- * @pgd: Pointer to the first top-level entry of the page-table.
- * @mm_ops: Memory management callbacks.
- * @mmu: Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
- * @flags: Stage-2 page-table flags.
- * @force_pte_cb: Function that returns true if page level mappings must
- * be used instead of block mappings.
- */
-struct kvm_pgtable {
- u32 ia_bits;
- u32 start_level;
- kvm_pte_t *pgd;
- struct kvm_pgtable_mm_ops *mm_ops;
-
- /* Stage-2 only */
- struct kvm_s2_mmu *mmu;
- enum kvm_pgtable_stage2_flags flags;
- kvm_pgtable_force_pte_cb_t force_pte_cb;
-};
-
-/**
* enum kvm_pgtable_walk_flags - Flags to control a depth-first page-table walk.
* @KVM_PGTABLE_WALK_LEAF: Visit leaf entries, including invalid
* entries.
@@ -192,17 +172,34 @@ struct kvm_pgtable {
* children.
* @KVM_PGTABLE_WALK_TABLE_POST: Visit table entries after their
* children.
+ * @KVM_PGTABLE_WALK_SHARED: Indicates the page-tables may be shared
+ * with other software walkers.
*/
enum kvm_pgtable_walk_flags {
KVM_PGTABLE_WALK_LEAF = BIT(0),
KVM_PGTABLE_WALK_TABLE_PRE = BIT(1),
KVM_PGTABLE_WALK_TABLE_POST = BIT(2),
+ KVM_PGTABLE_WALK_SHARED = BIT(3),
};
-typedef int (*kvm_pgtable_visitor_fn_t)(u64 addr, u64 end, u32 level,
- kvm_pte_t *ptep,
- enum kvm_pgtable_walk_flags flag,
- void * const arg);
+struct kvm_pgtable_visit_ctx {
+ kvm_pte_t *ptep;
+ kvm_pte_t old;
+ void *arg;
+ struct kvm_pgtable_mm_ops *mm_ops;
+ u64 addr;
+ u64 end;
+ u32 level;
+ enum kvm_pgtable_walk_flags flags;
+};
+
+typedef int (*kvm_pgtable_visitor_fn_t)(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit);
+
+static inline bool kvm_pgtable_walk_shared(const struct kvm_pgtable_visit_ctx *ctx)
+{
+ return ctx->flags & KVM_PGTABLE_WALK_SHARED;
+}
/**
* struct kvm_pgtable_walker - Hook into a page-table walk.
@@ -217,6 +214,94 @@ struct kvm_pgtable_walker {
const enum kvm_pgtable_walk_flags flags;
};
+/*
+ * RCU cannot be used in a non-kernel context such as the hyp. As such, page
+ * table walkers used in hyp do not call into RCU and instead use other
+ * synchronization mechanisms (such as a spinlock).
+ */
+#if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
+
+typedef kvm_pte_t *kvm_pteref_t;
+
+static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
+ kvm_pteref_t pteref)
+{
+ return pteref;
+}
+
+static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
+{
+ /*
+ * Due to the lack of RCU (or a similar protection scheme), only
+ * non-shared table walkers are allowed in the hypervisor.
+ */
+ if (walker->flags & KVM_PGTABLE_WALK_SHARED)
+ return -EPERM;
+
+ return 0;
+}
+
+static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker) {}
+
+static inline bool kvm_pgtable_walk_lock_held(void)
+{
+ return true;
+}
+
+#else
+
+typedef kvm_pte_t __rcu *kvm_pteref_t;
+
+static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
+ kvm_pteref_t pteref)
+{
+ return rcu_dereference_check(pteref, !(walker->flags & KVM_PGTABLE_WALK_SHARED));
+}
+
+static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
+{
+ if (walker->flags & KVM_PGTABLE_WALK_SHARED)
+ rcu_read_lock();
+
+ return 0;
+}
+
+static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker)
+{
+ if (walker->flags & KVM_PGTABLE_WALK_SHARED)
+ rcu_read_unlock();
+}
+
+static inline bool kvm_pgtable_walk_lock_held(void)
+{
+ return rcu_read_lock_held();
+}
+
+#endif
+
+/**
+ * struct kvm_pgtable - KVM page-table.
+ * @ia_bits: Maximum input address size, in bits.
+ * @start_level: Level at which the page-table walk starts.
+ * @pgd: Pointer to the first top-level entry of the page-table.
+ * @mm_ops: Memory management callbacks.
+ * @mmu: Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
+ * @flags: Stage-2 page-table flags.
+ * @force_pte_cb: Function that returns true if page level mappings must
+ * be used instead of block mappings.
+ */
+struct kvm_pgtable {
+ u32 ia_bits;
+ u32 start_level;
+ kvm_pteref_t pgd;
+ struct kvm_pgtable_mm_ops *mm_ops;
+
+ /* Stage-2 only */
+ struct kvm_s2_mmu *mmu;
+ enum kvm_pgtable_stage2_flags flags;
+ kvm_pgtable_force_pte_cb_t force_pte_cb;
+};
+
/**
* kvm_pgtable_hyp_init() - Initialise a hypervisor stage-1 page-table.
* @pgt: Uninitialised page-table structure to initialise.
@@ -325,6 +410,17 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
/**
+ * kvm_pgtable_stage2_free_removed() - Free a removed stage-2 paging structure.
+ * @mm_ops: Memory management callbacks.
+ * @pgtable: Unlinked stage-2 paging structure to be freed.
+ * @level: Level of the stage-2 paging structure to be freed.
+ *
+ * The page-table is assumed to be unreachable by any hardware walkers prior to
+ * freeing and therefore no TLB invalidation is performed.
+ */
+void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level);
+
+/**
* kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table.
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
* @addr: Intermediate physical address at which to place the mapping.
@@ -333,6 +429,7 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
* @prot: Permissions and attributes for the mapping.
* @mc: Cache of pre-allocated and zeroed memory from which to allocate
* page-table pages.
+ * @flags: Flags to control the page-table walk (ex. a shared walk)
*
* The offset of @addr within a page is ignored, @size is rounded-up to
* the next page boundary and @phys is rounded-down to the previous page
@@ -354,7 +451,7 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
*/
int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
u64 phys, enum kvm_pgtable_prot prot,
- void *mc);
+ void *mc, enum kvm_pgtable_walk_flags flags);
/**
* kvm_pgtable_stage2_set_owner() - Unmap and annotate pages in the IPA space to
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index 07f9dc9848ef..55326b1bd247 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -79,6 +79,11 @@ static void host_s2_put_page(void *addr)
hyp_put_page(&host_s2_pool, addr);
}
+static void host_s2_free_removed_table(void *addr, u32 level)
+{
+ kvm_pgtable_stage2_free_removed(&host_kvm.mm_ops, addr, level);
+}
+
static int prepare_s2_pool(void *pgt_pool_base)
{
unsigned long nr_pages, pfn;
@@ -93,6 +98,7 @@ static int prepare_s2_pool(void *pgt_pool_base)
host_kvm.mm_ops = (struct kvm_pgtable_mm_ops) {
.zalloc_pages_exact = host_s2_zalloc_pages_exact,
.zalloc_page = host_s2_zalloc_page,
+ .free_removed_table = host_s2_free_removed_table,
.phys_to_virt = hyp_phys_to_virt,
.virt_to_phys = hyp_virt_to_phys,
.page_count = hyp_page_count,
@@ -251,7 +257,7 @@ static inline int __host_stage2_idmap(u64 start, u64 end,
enum kvm_pgtable_prot prot)
{
return kvm_pgtable_stage2_map(&host_kvm.pgt, start, end - start, start,
- prot, &host_s2_pool);
+ prot, &host_s2_pool, 0);
}
/*
@@ -417,18 +423,15 @@ struct check_walk_data {
enum pkvm_page_state (*get_page_state)(kvm_pte_t pte);
};
-static int __check_page_state_visitor(u64 addr, u64 end, u32 level,
- kvm_pte_t *ptep,
- enum kvm_pgtable_walk_flags flag,
- void * const arg)
+static int __check_page_state_visitor(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
{
- struct check_walk_data *d = arg;
- kvm_pte_t pte = *ptep;
+ struct check_walk_data *d = ctx->arg;
- if (kvm_pte_valid(pte) && !addr_is_memory(kvm_pte_to_phys(pte)))
+ if (kvm_pte_valid(ctx->old) && !addr_is_memory(kvm_pte_to_phys(ctx->old)))
return -EINVAL;
- return d->get_page_state(pte) == d->desired ? 0 : -EPERM;
+ return d->get_page_state(ctx->old) == d->desired ? 0 : -EPERM;
}
static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size,
diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
index e8d4ea2fcfa0..1068338d77f3 100644
--- a/arch/arm64/kvm/hyp/nvhe/setup.c
+++ b/arch/arm64/kvm/hyp/nvhe/setup.c
@@ -186,18 +186,15 @@ static void hpool_put_page(void *addr)
hyp_put_page(&hpool, addr);
}
-static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level,
- kvm_pte_t *ptep,
- enum kvm_pgtable_walk_flags flag,
- void * const arg)
+static int finalize_host_mappings_walker(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
{
- struct kvm_pgtable_mm_ops *mm_ops = arg;
+ struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
enum kvm_pgtable_prot prot;
enum pkvm_page_state state;
- kvm_pte_t pte = *ptep;
phys_addr_t phys;
- if (!kvm_pte_valid(pte))
+ if (!kvm_pte_valid(ctx->old))
return 0;
/*
@@ -205,14 +202,14 @@ static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level,
* was unable to access the hyp_vmemmap and so the buddy allocator has
* initialised the refcount to '1'.
*/
- mm_ops->get_page(ptep);
- if (flag != KVM_PGTABLE_WALK_LEAF)
+ mm_ops->get_page(ctx->ptep);
+ if (visit != KVM_PGTABLE_WALK_LEAF)
return 0;
- if (level != (KVM_PGTABLE_MAX_LEVELS - 1))
+ if (ctx->level != (KVM_PGTABLE_MAX_LEVELS - 1))
return -EINVAL;
- phys = kvm_pte_to_phys(pte);
+ phys = kvm_pte_to_phys(ctx->old);
if (!addr_is_memory(phys))
return -EINVAL;
@@ -220,7 +217,7 @@ static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level,
* Adjust the host stage-2 mappings to match the ownership attributes
* configured in the hypervisor stage-1.
*/
- state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(pte));
+ state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(ctx->old));
switch (state) {
case PKVM_PAGE_OWNED:
return host_stage2_set_owner_locked(phys, PAGE_SIZE, pkvm_hyp_id);
@@ -242,7 +239,6 @@ static int finalize_host_mappings(void)
struct kvm_pgtable_walker walker = {
.cb = finalize_host_mappings_walker,
.flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
- .arg = pkvm_pgtable.mm_ops,
};
int i, ret;
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index cdf8e76b0be1..58dbe0ab567f 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -49,8 +49,13 @@
#define KVM_INVALID_PTE_OWNER_MASK GENMASK(9, 2)
#define KVM_MAX_OWNER_ID 1
+/*
+ * Used to indicate a pte for which a 'break-before-make' sequence is in
+ * progress.
+ */
+#define KVM_INVALID_PTE_LOCKED BIT(10)
+
struct kvm_pgtable_walk_data {
- struct kvm_pgtable *pgt;
struct kvm_pgtable_walker *walker;
u64 addr;
@@ -64,20 +69,20 @@ static bool kvm_phys_is_valid(u64 phys)
return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_EL1_PARANGE_MAX));
}
-static bool kvm_block_mapping_supported(u64 addr, u64 end, u64 phys, u32 level)
+static bool kvm_block_mapping_supported(const struct kvm_pgtable_visit_ctx *ctx, u64 phys)
{
- u64 granule = kvm_granule_size(level);
+ u64 granule = kvm_granule_size(ctx->level);
- if (!kvm_level_supports_block_mapping(level))
+ if (!kvm_level_supports_block_mapping(ctx->level))
return false;
- if (granule > (end - addr))
+ if (granule > (ctx->end - ctx->addr))
return false;
if (kvm_phys_is_valid(phys) && !IS_ALIGNED(phys, granule))
return false;
- return IS_ALIGNED(addr, granule);
+ return IS_ALIGNED(ctx->addr, granule);
}
static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, u32 level)
@@ -88,7 +93,7 @@ static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, u32 level)
return (data->addr >> shift) & mask;
}
-static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
+static u32 kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
{
u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */
u64 mask = BIT(pgt->ia_bits) - 1;
@@ -96,11 +101,6 @@ static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
return (addr & mask) >> shift;
}
-static u32 kvm_pgd_page_idx(struct kvm_pgtable_walk_data *data)
-{
- return __kvm_pgd_page_idx(data->pgt, data->addr);
-}
-
static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level)
{
struct kvm_pgtable pgt = {
@@ -108,7 +108,7 @@ static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level)
.start_level = start_level,
};
- return __kvm_pgd_page_idx(&pgt, -1ULL) + 1;
+ return kvm_pgd_page_idx(&pgt, -1ULL) + 1;
}
static bool kvm_pte_table(kvm_pte_t pte, u32 level)
@@ -142,16 +142,13 @@ static void kvm_clear_pte(kvm_pte_t *ptep)
WRITE_ONCE(*ptep, 0);
}
-static void kvm_set_table_pte(kvm_pte_t *ptep, kvm_pte_t *childp,
- struct kvm_pgtable_mm_ops *mm_ops)
+static kvm_pte_t kvm_init_table_pte(kvm_pte_t *childp, struct kvm_pgtable_mm_ops *mm_ops)
{
- kvm_pte_t old = *ptep, pte = kvm_phys_to_pte(mm_ops->virt_to_phys(childp));
+ kvm_pte_t pte = kvm_phys_to_pte(mm_ops->virt_to_phys(childp));
pte |= FIELD_PREP(KVM_PTE_TYPE, KVM_PTE_TYPE_TABLE);
pte |= KVM_PTE_VALID;
-
- WARN_ON(kvm_pte_valid(old));
- smp_store_release(ptep, pte);
+ return pte;
}
static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, u32 level)
@@ -172,36 +169,47 @@ static kvm_pte_t kvm_init_invalid_leaf_owner(u8 owner_id)
return FIELD_PREP(KVM_INVALID_PTE_OWNER_MASK, owner_id);
}
-static int kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data *data, u64 addr,
- u32 level, kvm_pte_t *ptep,
- enum kvm_pgtable_walk_flags flag)
+static int kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data *data,
+ const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
{
struct kvm_pgtable_walker *walker = data->walker;
- return walker->cb(addr, data->end, level, ptep, flag, walker->arg);
+
+ /* Ensure the appropriate lock is held (e.g. RCU lock for stage-2 MMU) */
+ WARN_ON_ONCE(kvm_pgtable_walk_shared(ctx) && !kvm_pgtable_walk_lock_held());
+ return walker->cb(ctx, visit);
}
static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
- kvm_pte_t *pgtable, u32 level);
+ struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, u32 level);
static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
- kvm_pte_t *ptep, u32 level)
+ struct kvm_pgtable_mm_ops *mm_ops,
+ kvm_pteref_t pteref, u32 level)
{
- int ret = 0;
- u64 addr = data->addr;
- kvm_pte_t *childp, pte = *ptep;
- bool table = kvm_pte_table(pte, level);
enum kvm_pgtable_walk_flags flags = data->walker->flags;
+ kvm_pte_t *ptep = kvm_dereference_pteref(data->walker, pteref);
+ struct kvm_pgtable_visit_ctx ctx = {
+ .ptep = ptep,
+ .old = READ_ONCE(*ptep),
+ .arg = data->walker->arg,
+ .mm_ops = mm_ops,
+ .addr = data->addr,
+ .end = data->end,
+ .level = level,
+ .flags = flags,
+ };
+ int ret = 0;
+ kvm_pteref_t childp;
+ bool table = kvm_pte_table(ctx.old, level);
- if (table && (flags & KVM_PGTABLE_WALK_TABLE_PRE)) {
- ret = kvm_pgtable_visitor_cb(data, addr, level, ptep,
- KVM_PGTABLE_WALK_TABLE_PRE);
- }
+ if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE))
+ ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_PRE);
- if (!table && (flags & KVM_PGTABLE_WALK_LEAF)) {
- ret = kvm_pgtable_visitor_cb(data, addr, level, ptep,
- KVM_PGTABLE_WALK_LEAF);
- pte = *ptep;
- table = kvm_pte_table(pte, level);
+ if (!table && (ctx.flags & KVM_PGTABLE_WALK_LEAF)) {
+ ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_LEAF);
+ ctx.old = READ_ONCE(*ptep);
+ table = kvm_pte_table(ctx.old, level);
}
if (ret)
@@ -213,22 +221,20 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
goto out;
}
- childp = kvm_pte_follow(pte, data->pgt->mm_ops);
- ret = __kvm_pgtable_walk(data, childp, level + 1);
+ childp = (kvm_pteref_t)kvm_pte_follow(ctx.old, mm_ops);
+ ret = __kvm_pgtable_walk(data, mm_ops, childp, level + 1);
if (ret)
goto out;
- if (flags & KVM_PGTABLE_WALK_TABLE_POST) {
- ret = kvm_pgtable_visitor_cb(data, addr, level, ptep,
- KVM_PGTABLE_WALK_TABLE_POST);
- }
+ if (ctx.flags & KVM_PGTABLE_WALK_TABLE_POST)
+ ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_POST);
out:
return ret;
}
static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
- kvm_pte_t *pgtable, u32 level)
+ struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, u32 level)
{
u32 idx;
int ret = 0;
@@ -237,12 +243,12 @@ static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
return -EINVAL;
for (idx = kvm_pgtable_idx(data, level); idx < PTRS_PER_PTE; ++idx) {
- kvm_pte_t *ptep = &pgtable[idx];
+ kvm_pteref_t pteref = &pgtable[idx];
if (data->addr >= data->end)
break;
- ret = __kvm_pgtable_visit(data, ptep, level);
+ ret = __kvm_pgtable_visit(data, mm_ops, pteref, level);
if (ret)
break;
}
@@ -250,11 +256,10 @@ static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
return ret;
}
-static int _kvm_pgtable_walk(struct kvm_pgtable_walk_data *data)
+static int _kvm_pgtable_walk(struct kvm_pgtable *pgt, struct kvm_pgtable_walk_data *data)
{
u32 idx;
int ret = 0;
- struct kvm_pgtable *pgt = data->pgt;
u64 limit = BIT(pgt->ia_bits);
if (data->addr > limit || data->end > limit)
@@ -263,10 +268,10 @@ static int _kvm_pgtable_walk(struct kvm_pgtable_walk_data *data)
if (!pgt->pgd)
return -EINVAL;
- for (idx = kvm_pgd_page_idx(data); data->addr < data->end; ++idx) {
- kvm_pte_t *ptep = &pgt->pgd[idx * PTRS_PER_PTE];
+ for (idx = kvm_pgd_page_idx(pgt, data->addr); data->addr < data->end; ++idx) {
+ kvm_pteref_t pteref = &pgt->pgd[idx * PTRS_PER_PTE];
- ret = __kvm_pgtable_walk(data, ptep, pgt->start_level);
+ ret = __kvm_pgtable_walk(data, pgt->mm_ops, pteref, pgt->start_level);
if (ret)
break;
}
@@ -278,13 +283,20 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
struct kvm_pgtable_walker *walker)
{
struct kvm_pgtable_walk_data walk_data = {
- .pgt = pgt,
.addr = ALIGN_DOWN(addr, PAGE_SIZE),
.end = PAGE_ALIGN(walk_data.addr + size),
.walker = walker,
};
+ int r;
+
+ r = kvm_pgtable_walk_begin(walker);
+ if (r)
+ return r;
- return _kvm_pgtable_walk(&walk_data);
+ r = _kvm_pgtable_walk(pgt, &walk_data);
+ kvm_pgtable_walk_end(walker);
+
+ return r;
}
struct leaf_walk_data {
@@ -292,13 +304,13 @@ struct leaf_walk_data {
u32 level;
};
-static int leaf_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
- enum kvm_pgtable_walk_flags flag, void * const arg)
+static int leaf_walker(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
{
- struct leaf_walk_data *data = arg;
+ struct leaf_walk_data *data = ctx->arg;
- data->pte = *ptep;
- data->level = level;
+ data->pte = ctx->old;
+ data->level = ctx->level;
return 0;
}
@@ -329,7 +341,6 @@ int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
struct hyp_map_data {
u64 phys;
kvm_pte_t attr;
- struct kvm_pgtable_mm_ops *mm_ops;
};
static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
@@ -383,47 +394,49 @@ enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte)
return prot;
}
-static bool hyp_map_walker_try_leaf(u64 addr, u64 end, u32 level,
- kvm_pte_t *ptep, struct hyp_map_data *data)
+static bool hyp_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
+ struct hyp_map_data *data)
{
- kvm_pte_t new, old = *ptep;
- u64 granule = kvm_granule_size(level), phys = data->phys;
+ kvm_pte_t new;
+ u64 granule = kvm_granule_size(ctx->level), phys = data->phys;
- if (!kvm_block_mapping_supported(addr, end, phys, level))
+ if (!kvm_block_mapping_supported(ctx, phys))
return false;
data->phys += granule;
- new = kvm_init_valid_leaf_pte(phys, data->attr, level);
- if (old == new)
+ new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
+ if (ctx->old == new)
return true;
- if (!kvm_pte_valid(old))
- data->mm_ops->get_page(ptep);
- else if (WARN_ON((old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW))
+ if (!kvm_pte_valid(ctx->old))
+ ctx->mm_ops->get_page(ctx->ptep);
+ else if (WARN_ON((ctx->old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW))
return false;
- smp_store_release(ptep, new);
+ smp_store_release(ctx->ptep, new);
return true;
}
-static int hyp_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
- enum kvm_pgtable_walk_flags flag, void * const arg)
+static int hyp_map_walker(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
{
- kvm_pte_t *childp;
- struct hyp_map_data *data = arg;
- struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
+ kvm_pte_t *childp, new;
+ struct hyp_map_data *data = ctx->arg;
+ struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
- if (hyp_map_walker_try_leaf(addr, end, level, ptep, arg))
+ if (hyp_map_walker_try_leaf(ctx, data))
return 0;
- if (WARN_ON(level == KVM_PGTABLE_MAX_LEVELS - 1))
+ if (WARN_ON(ctx->level == KVM_PGTABLE_MAX_LEVELS - 1))
return -EINVAL;
childp = (kvm_pte_t *)mm_ops->zalloc_page(NULL);
if (!childp)
return -ENOMEM;
- kvm_set_table_pte(ptep, childp, mm_ops);
- mm_ops->get_page(ptep);
+ new = kvm_init_table_pte(childp, mm_ops);
+ mm_ops->get_page(ctx->ptep);
+ smp_store_release(ctx->ptep, new);
+
return 0;
}
@@ -433,7 +446,6 @@ int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
int ret;
struct hyp_map_data map_data = {
.phys = ALIGN_DOWN(phys, PAGE_SIZE),
- .mm_ops = pgt->mm_ops,
};
struct kvm_pgtable_walker walker = {
.cb = hyp_map_walker,
@@ -451,44 +463,39 @@ int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
return ret;
}
-struct hyp_unmap_data {
- u64 unmapped;
- struct kvm_pgtable_mm_ops *mm_ops;
-};
-
-static int hyp_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
- enum kvm_pgtable_walk_flags flag, void * const arg)
+static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
{
- kvm_pte_t pte = *ptep, *childp = NULL;
- u64 granule = kvm_granule_size(level);
- struct hyp_unmap_data *data = arg;
- struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
+ kvm_pte_t *childp = NULL;
+ u64 granule = kvm_granule_size(ctx->level);
+ u64 *unmapped = ctx->arg;
+ struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
- if (!kvm_pte_valid(pte))
+ if (!kvm_pte_valid(ctx->old))
return -EINVAL;
- if (kvm_pte_table(pte, level)) {
- childp = kvm_pte_follow(pte, mm_ops);
+ if (kvm_pte_table(ctx->old, ctx->level)) {
+ childp = kvm_pte_follow(ctx->old, mm_ops);
if (mm_ops->page_count(childp) != 1)
return 0;
- kvm_clear_pte(ptep);
+ kvm_clear_pte(ctx->ptep);
dsb(ishst);
- __tlbi_level(vae2is, __TLBI_VADDR(addr, 0), level);
+ __tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
} else {
- if (end - addr < granule)
+ if (ctx->end - ctx->addr < granule)
return -EINVAL;
- kvm_clear_pte(ptep);
+ kvm_clear_pte(ctx->ptep);
dsb(ishst);
- __tlbi_level(vale2is, __TLBI_VADDR(addr, 0), level);
- data->unmapped += granule;
+ __tlbi_level(vale2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
+ *unmapped += granule;
}
dsb(ish);
isb();
- mm_ops->put_page(ptep);
+ mm_ops->put_page(ctx->ptep);
if (childp)
mm_ops->put_page(childp);
@@ -498,12 +505,10 @@ static int hyp_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
{
- struct hyp_unmap_data unmap_data = {
- .mm_ops = pgt->mm_ops,
- };
+ u64 unmapped = 0;
struct kvm_pgtable_walker walker = {
.cb = hyp_unmap_walker,
- .arg = &unmap_data,
+ .arg = &unmapped,
.flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
};
@@ -511,7 +516,7 @@ u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
return 0;
kvm_pgtable_walk(pgt, addr, size, &walker);
- return unmap_data.unmapped;
+ return unmapped;
}
int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
@@ -519,7 +524,7 @@ int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
{
u64 levels = ARM64_HW_PGTABLE_LEVELS(va_bits);
- pgt->pgd = (kvm_pte_t *)mm_ops->zalloc_page(NULL);
+ pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_page(NULL);
if (!pgt->pgd)
return -ENOMEM;
@@ -532,19 +537,18 @@ int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
return 0;
}
-static int hyp_free_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
- enum kvm_pgtable_walk_flags flag, void * const arg)
+static int hyp_free_walker(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
{
- struct kvm_pgtable_mm_ops *mm_ops = arg;
- kvm_pte_t pte = *ptep;
+ struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
- if (!kvm_pte_valid(pte))
+ if (!kvm_pte_valid(ctx->old))
return 0;
- mm_ops->put_page(ptep);
+ mm_ops->put_page(ctx->ptep);
- if (kvm_pte_table(pte, level))
- mm_ops->put_page(kvm_pte_follow(pte, mm_ops));
+ if (kvm_pte_table(ctx->old, ctx->level))
+ mm_ops->put_page(kvm_pte_follow(ctx->old, mm_ops));
return 0;
}
@@ -554,11 +558,10 @@ void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
struct kvm_pgtable_walker walker = {
.cb = hyp_free_walker,
.flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
- .arg = pgt->mm_ops,
};
WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
- pgt->mm_ops->put_page(pgt->pgd);
+ pgt->mm_ops->put_page(kvm_dereference_pteref(&walker, pgt->pgd));
pgt->pgd = NULL;
}
@@ -573,8 +576,6 @@ struct stage2_map_data {
struct kvm_s2_mmu *mmu;
void *memcache;
- struct kvm_pgtable_mm_ops *mm_ops;
-
/* Force mappings to page granularity */
bool force_pte;
};
@@ -682,19 +683,92 @@ static bool stage2_pte_is_counted(kvm_pte_t pte)
return !!pte;
}
-static void stage2_put_pte(kvm_pte_t *ptep, struct kvm_s2_mmu *mmu, u64 addr,
- u32 level, struct kvm_pgtable_mm_ops *mm_ops)
+static bool stage2_pte_is_locked(kvm_pte_t pte)
+{
+ return !kvm_pte_valid(pte) && (pte & KVM_INVALID_PTE_LOCKED);
+}
+
+static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
+{
+ if (!kvm_pgtable_walk_shared(ctx)) {
+ WRITE_ONCE(*ctx->ptep, new);
+ return true;
+ }
+
+ return cmpxchg(ctx->ptep, ctx->old, new) == ctx->old;
+}
+
+/**
+ * stage2_try_break_pte() - Invalidates a pte according to the
+ * 'break-before-make' requirements of the
+ * architecture.
+ *
+ * @ctx: context of the visited pte.
+ * @mmu: stage-2 mmu
+ *
+ * Returns: true if the pte was successfully broken.
+ *
+ * If the removed pte was valid, performs the necessary serialization and TLB
+ * invalidation for the old value. For counted ptes, drops the reference count
+ * on the containing table page.
+ */
+static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
+ struct kvm_s2_mmu *mmu)
+{
+ struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
+
+ if (stage2_pte_is_locked(ctx->old)) {
+ /*
+ * Should never occur if this walker has exclusive access to the
+ * page tables.
+ */
+ WARN_ON(!kvm_pgtable_walk_shared(ctx));
+ return false;
+ }
+
+ if (!stage2_try_set_pte(ctx, KVM_INVALID_PTE_LOCKED))
+ return false;
+
+ /*
+ * Perform the appropriate TLB invalidation based on the evicted pte
+ * value (if any).
+ */
+ if (kvm_pte_table(ctx->old, ctx->level))
+ kvm_call_hyp(__kvm_tlb_flush_vmid, mmu);
+ else if (kvm_pte_valid(ctx->old))
+ kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, ctx->level);
+
+ if (stage2_pte_is_counted(ctx->old))
+ mm_ops->put_page(ctx->ptep);
+
+ return true;
+}
+
+static void stage2_make_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
+{
+ struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
+
+ WARN_ON(!stage2_pte_is_locked(*ctx->ptep));
+
+ if (stage2_pte_is_counted(new))
+ mm_ops->get_page(ctx->ptep);
+
+ smp_store_release(ctx->ptep, new);
+}
+
+static void stage2_put_pte(const struct kvm_pgtable_visit_ctx *ctx, struct kvm_s2_mmu *mmu,
+ struct kvm_pgtable_mm_ops *mm_ops)
{
/*
* Clear the existing PTE, and perform break-before-make with
* TLB maintenance if it was valid.
*/
- if (kvm_pte_valid(*ptep)) {
- kvm_clear_pte(ptep);
- kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, addr, level);
+ if (kvm_pte_valid(ctx->old)) {
+ kvm_clear_pte(ctx->ptep);
+ kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, ctx->level);
}
- mm_ops->put_page(ptep);
+ mm_ops->put_page(ctx->ptep);
}
static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte)
@@ -708,44 +782,42 @@ static bool stage2_pte_executable(kvm_pte_t pte)
return !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN);
}
-static bool stage2_leaf_mapping_allowed(u64 addr, u64 end, u32 level,
+static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx,
struct stage2_map_data *data)
{
- if (data->force_pte && (level < (KVM_PGTABLE_MAX_LEVELS - 1)))
+ if (data->force_pte && (ctx->level < (KVM_PGTABLE_MAX_LEVELS - 1)))
return false;
- return kvm_block_mapping_supported(addr, end, data->phys, level);
+ return kvm_block_mapping_supported(ctx, data->phys);
}
-static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level,
- kvm_pte_t *ptep,
+static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
struct stage2_map_data *data)
{
- kvm_pte_t new, old = *ptep;
- u64 granule = kvm_granule_size(level), phys = data->phys;
+ kvm_pte_t new;
+ u64 granule = kvm_granule_size(ctx->level), phys = data->phys;
struct kvm_pgtable *pgt = data->mmu->pgt;
- struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
+ struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
- if (!stage2_leaf_mapping_allowed(addr, end, level, data))
+ if (!stage2_leaf_mapping_allowed(ctx, data))
return -E2BIG;
if (kvm_phys_is_valid(phys))
- new = kvm_init_valid_leaf_pte(phys, data->attr, level);
+ new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
else
new = kvm_init_invalid_leaf_owner(data->owner_id);
- if (stage2_pte_is_counted(old)) {
- /*
- * Skip updating the PTE if we are trying to recreate the exact
- * same mapping or only change the access permissions. Instead,
- * the vCPU will exit one more time from guest if still needed
- * and then go through the path of relaxing permissions.
- */
- if (!stage2_pte_needs_update(old, new))
- return -EAGAIN;
+ /*
+ * Skip updating the PTE if we are trying to recreate the exact
+ * same mapping or only change the access permissions. Instead,
+ * the vCPU will exit one more time from guest if still needed
+ * and then go through the path of relaxing permissions.
+ */
+ if (!stage2_pte_needs_update(ctx->old, new))
+ return -EAGAIN;
- stage2_put_pte(ptep, data->mmu, addr, level, mm_ops);
- }
+ if (!stage2_try_break_pte(ctx, data->mmu))
+ return -EAGAIN;
/* Perform CMOs before installation of the guest stage-2 PTE */
if (mm_ops->dcache_clean_inval_poc && stage2_pte_cacheable(pgt, new))
@@ -755,56 +827,43 @@ static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level,
if (mm_ops->icache_inval_pou && stage2_pte_executable(new))
mm_ops->icache_inval_pou(kvm_pte_follow(new, mm_ops), granule);
- smp_store_release(ptep, new);
- if (stage2_pte_is_counted(new))
- mm_ops->get_page(ptep);
+ stage2_make_pte(ctx, new);
+
if (kvm_phys_is_valid(phys))
data->phys += granule;
return 0;
}
-static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level,
- kvm_pte_t *ptep,
+static int stage2_map_walk_table_pre(const struct kvm_pgtable_visit_ctx *ctx,
struct stage2_map_data *data)
{
- if (data->anchor)
- return 0;
+ struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
+ kvm_pte_t *childp = kvm_pte_follow(ctx->old, mm_ops);
+ int ret;
- if (!stage2_leaf_mapping_allowed(addr, end, level, data))
+ if (!stage2_leaf_mapping_allowed(ctx, data))
return 0;
- data->childp = kvm_pte_follow(*ptep, data->mm_ops);
- kvm_clear_pte(ptep);
+ ret = stage2_map_walker_try_leaf(ctx, data);
+ if (ret)
+ return ret;
- /*
- * Invalidate the whole stage-2, as we may have numerous leaf
- * entries below us which would otherwise need invalidating
- * individually.
- */
- kvm_call_hyp(__kvm_tlb_flush_vmid, data->mmu);
- data->anchor = ptep;
+ mm_ops->free_removed_table(childp, ctx->level);
return 0;
}
-static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
+static int stage2_map_walk_leaf(const struct kvm_pgtable_visit_ctx *ctx,
struct stage2_map_data *data)
{
- struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
- kvm_pte_t *childp, pte = *ptep;
+ struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
+ kvm_pte_t *childp, new;
int ret;
- if (data->anchor) {
- if (stage2_pte_is_counted(pte))
- mm_ops->put_page(ptep);
-
- return 0;
- }
-
- ret = stage2_map_walker_try_leaf(addr, end, level, ptep, data);
+ ret = stage2_map_walker_try_leaf(ctx, data);
if (ret != -E2BIG)
return ret;
- if (WARN_ON(level == KVM_PGTABLE_MAX_LEVELS - 1))
+ if (WARN_ON(ctx->level == KVM_PGTABLE_MAX_LEVELS - 1))
return -EINVAL;
if (!data->memcache)
@@ -814,99 +873,62 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
if (!childp)
return -ENOMEM;
+ if (!stage2_try_break_pte(ctx, data->mmu)) {
+ mm_ops->put_page(childp);
+ return -EAGAIN;
+ }
+
/*
* If we've run into an existing block mapping then replace it with
* a table. Accesses beyond 'end' that fall within the new table
* will be mapped lazily.
*/
- if (stage2_pte_is_counted(pte))
- stage2_put_pte(ptep, data->mmu, addr, level, mm_ops);
-
- kvm_set_table_pte(ptep, childp, mm_ops);
- mm_ops->get_page(ptep);
+ new = kvm_init_table_pte(childp, mm_ops);
+ stage2_make_pte(ctx, new);
return 0;
}
-static int stage2_map_walk_table_post(u64 addr, u64 end, u32 level,
- kvm_pte_t *ptep,
- struct stage2_map_data *data)
-{
- struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
- kvm_pte_t *childp;
- int ret = 0;
-
- if (!data->anchor)
- return 0;
-
- if (data->anchor == ptep) {
- childp = data->childp;
- data->anchor = NULL;
- data->childp = NULL;
- ret = stage2_map_walk_leaf(addr, end, level, ptep, data);
- } else {
- childp = kvm_pte_follow(*ptep, mm_ops);
- }
-
- mm_ops->put_page(childp);
- mm_ops->put_page(ptep);
-
- return ret;
-}
-
/*
- * This is a little fiddly, as we use all three of the walk flags. The idea
- * is that the TABLE_PRE callback runs for table entries on the way down,
- * looking for table entries which we could conceivably replace with a
- * block entry for this mapping. If it finds one, then it sets the 'anchor'
- * field in 'struct stage2_map_data' to point at the table entry, before
- * clearing the entry to zero and descending into the now detached table.
+ * The TABLE_PRE callback runs for table entries on the way down, looking
+ * for table entries which we could conceivably replace with a block entry
+ * for this mapping. If it finds one it replaces the entry and calls
+ * kvm_pgtable_mm_ops::free_removed_table() to tear down the detached table.
*
- * The behaviour of the LEAF callback then depends on whether or not the
- * anchor has been set. If not, then we're not using a block mapping higher
- * up the table and we perform the mapping at the existing leaves instead.
- * If, on the other hand, the anchor _is_ set, then we drop references to
- * all valid leaves so that the pages beneath the anchor can be freed.
- *
- * Finally, the TABLE_POST callback does nothing if the anchor has not
- * been set, but otherwise frees the page-table pages while walking back up
- * the page-table, installing the block entry when it revisits the anchor
- * pointer and clearing the anchor to NULL.
+ * Otherwise, the LEAF callback performs the mapping at the existing leaves
+ * instead.
*/
-static int stage2_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
- enum kvm_pgtable_walk_flags flag, void * const arg)
+static int stage2_map_walker(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
{
- struct stage2_map_data *data = arg;
+ struct stage2_map_data *data = ctx->arg;
- switch (flag) {
+ switch (visit) {
case KVM_PGTABLE_WALK_TABLE_PRE:
- return stage2_map_walk_table_pre(addr, end, level, ptep, data);
+ return stage2_map_walk_table_pre(ctx, data);
case KVM_PGTABLE_WALK_LEAF:
- return stage2_map_walk_leaf(addr, end, level, ptep, data);
- case KVM_PGTABLE_WALK_TABLE_POST:
- return stage2_map_walk_table_post(addr, end, level, ptep, data);
+ return stage2_map_walk_leaf(ctx, data);
+ default:
+ return -EINVAL;
}
-
- return -EINVAL;
}
int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
u64 phys, enum kvm_pgtable_prot prot,
- void *mc)
+ void *mc, enum kvm_pgtable_walk_flags flags)
{
int ret;
struct stage2_map_data map_data = {
.phys = ALIGN_DOWN(phys, PAGE_SIZE),
.mmu = pgt->mmu,
.memcache = mc,
- .mm_ops = pgt->mm_ops,
.force_pte = pgt->force_pte_cb && pgt->force_pte_cb(addr, addr + size, prot),
};
struct kvm_pgtable_walker walker = {
.cb = stage2_map_walker,
- .flags = KVM_PGTABLE_WALK_TABLE_PRE |
- KVM_PGTABLE_WALK_LEAF |
- KVM_PGTABLE_WALK_TABLE_POST,
+ .flags = flags |
+ KVM_PGTABLE_WALK_TABLE_PRE |
+ KVM_PGTABLE_WALK_LEAF,
.arg = &map_data,
};
@@ -930,15 +952,13 @@ int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
.phys = KVM_PHYS_INVALID,
.mmu = pgt->mmu,
.memcache = mc,
- .mm_ops = pgt->mm_ops,
.owner_id = owner_id,
.force_pte = true,
};
struct kvm_pgtable_walker walker = {
.cb = stage2_map_walker,
.flags = KVM_PGTABLE_WALK_TABLE_PRE |
- KVM_PGTABLE_WALK_LEAF |
- KVM_PGTABLE_WALK_TABLE_POST,
+ KVM_PGTABLE_WALK_LEAF,
.arg = &map_data,
};
@@ -949,30 +969,29 @@ int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
return ret;
}
-static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
- enum kvm_pgtable_walk_flags flag,
- void * const arg)
+static int stage2_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
{
- struct kvm_pgtable *pgt = arg;
+ struct kvm_pgtable *pgt = ctx->arg;
struct kvm_s2_mmu *mmu = pgt->mmu;
- struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
- kvm_pte_t pte = *ptep, *childp = NULL;
+ struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
+ kvm_pte_t *childp = NULL;
bool need_flush = false;
- if (!kvm_pte_valid(pte)) {
- if (stage2_pte_is_counted(pte)) {
- kvm_clear_pte(ptep);
- mm_ops->put_page(ptep);
+ if (!kvm_pte_valid(ctx->old)) {
+ if (stage2_pte_is_counted(ctx->old)) {
+ kvm_clear_pte(ctx->ptep);
+ mm_ops->put_page(ctx->ptep);
}
return 0;
}
- if (kvm_pte_table(pte, level)) {
- childp = kvm_pte_follow(pte, mm_ops);
+ if (kvm_pte_table(ctx->old, ctx->level)) {
+ childp = kvm_pte_follow(ctx->old, mm_ops);
if (mm_ops->page_count(childp) != 1)
return 0;
- } else if (stage2_pte_cacheable(pgt, pte)) {
+ } else if (stage2_pte_cacheable(pgt, ctx->old)) {
need_flush = !stage2_has_fwb(pgt);
}
@@ -981,11 +1000,11 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
* block entry and rely on the remaining portions being faulted
* back lazily.
*/
- stage2_put_pte(ptep, mmu, addr, level, mm_ops);
+ stage2_put_pte(ctx, mmu, mm_ops);
if (need_flush && mm_ops->dcache_clean_inval_poc)
- mm_ops->dcache_clean_inval_poc(kvm_pte_follow(pte, mm_ops),
- kvm_granule_size(level));
+ mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops),
+ kvm_granule_size(ctx->level));
if (childp)
mm_ops->put_page(childp);
@@ -1009,21 +1028,19 @@ struct stage2_attr_data {
kvm_pte_t attr_clr;
kvm_pte_t pte;
u32 level;
- struct kvm_pgtable_mm_ops *mm_ops;
};
-static int stage2_attr_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
- enum kvm_pgtable_walk_flags flag,
- void * const arg)
+static int stage2_attr_walker(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
{
- kvm_pte_t pte = *ptep;
- struct stage2_attr_data *data = arg;
- struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
+ kvm_pte_t pte = ctx->old;
+ struct stage2_attr_data *data = ctx->arg;
+ struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
- if (!kvm_pte_valid(pte))
+ if (!kvm_pte_valid(ctx->old))
return 0;
- data->level = level;
+ data->level = ctx->level;
data->pte = pte;
pte &= ~data->attr_clr;
pte |= data->attr_set;
@@ -1039,10 +1056,12 @@ static int stage2_attr_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
* stage-2 PTE if we are going to add executable permission.
*/
if (mm_ops->icache_inval_pou &&
- stage2_pte_executable(pte) && !stage2_pte_executable(*ptep))
+ stage2_pte_executable(pte) && !stage2_pte_executable(ctx->old))
mm_ops->icache_inval_pou(kvm_pte_follow(pte, mm_ops),
- kvm_granule_size(level));
- WRITE_ONCE(*ptep, pte);
+ kvm_granule_size(ctx->level));
+
+ if (!stage2_try_set_pte(ctx, pte))
+ return -EAGAIN;
}
return 0;
@@ -1051,19 +1070,18 @@ static int stage2_attr_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
static int stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr,
u64 size, kvm_pte_t attr_set,
kvm_pte_t attr_clr, kvm_pte_t *orig_pte,
- u32 *level)
+ u32 *level, enum kvm_pgtable_walk_flags flags)
{
int ret;
kvm_pte_t attr_mask = KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI;
struct stage2_attr_data data = {
.attr_set = attr_set & attr_mask,
.attr_clr = attr_clr & attr_mask,
- .mm_ops = pgt->mm_ops,
};
struct kvm_pgtable_walker walker = {
.cb = stage2_attr_walker,
.arg = &data,
- .flags = KVM_PGTABLE_WALK_LEAF,
+ .flags = flags | KVM_PGTABLE_WALK_LEAF,
};
ret = kvm_pgtable_walk(pgt, addr, size, &walker);
@@ -1082,14 +1100,14 @@ int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
{
return stage2_update_leaf_attrs(pgt, addr, size, 0,
KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W,
- NULL, NULL);
+ NULL, NULL, 0);
}
kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr)
{
kvm_pte_t pte = 0;
stage2_update_leaf_attrs(pgt, addr, 1, KVM_PTE_LEAF_ATTR_LO_S2_AF, 0,
- &pte, NULL);
+ &pte, NULL, 0);
dsb(ishst);
return pte;
}
@@ -1098,7 +1116,7 @@ kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr)
{
kvm_pte_t pte = 0;
stage2_update_leaf_attrs(pgt, addr, 1, 0, KVM_PTE_LEAF_ATTR_LO_S2_AF,
- &pte, NULL);
+ &pte, NULL, 0);
/*
* "But where's the TLBI?!", you scream.
* "Over in the core code", I sigh.
@@ -1111,7 +1129,7 @@ kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr)
bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr)
{
kvm_pte_t pte = 0;
- stage2_update_leaf_attrs(pgt, addr, 1, 0, 0, &pte, NULL);
+ stage2_update_leaf_attrs(pgt, addr, 1, 0, 0, &pte, NULL, 0);
return pte & KVM_PTE_LEAF_ATTR_LO_S2_AF;
}
@@ -1134,26 +1152,25 @@ int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
if (prot & KVM_PGTABLE_PROT_X)
clr |= KVM_PTE_LEAF_ATTR_HI_S2_XN;
- ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level);
+ ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level,
+ KVM_PGTABLE_WALK_SHARED);
if (!ret)
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, pgt->mmu, addr, level);
return ret;
}
-static int stage2_flush_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
- enum kvm_pgtable_walk_flags flag,
- void * const arg)
+static int stage2_flush_walker(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
{
- struct kvm_pgtable *pgt = arg;
+ struct kvm_pgtable *pgt = ctx->arg;
struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
- kvm_pte_t pte = *ptep;
- if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte))
+ if (!kvm_pte_valid(ctx->old) || !stage2_pte_cacheable(pgt, ctx->old))
return 0;
if (mm_ops->dcache_clean_inval_poc)
- mm_ops->dcache_clean_inval_poc(kvm_pte_follow(pte, mm_ops),
- kvm_granule_size(level));
+ mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops),
+ kvm_granule_size(ctx->level));
return 0;
}
@@ -1184,7 +1201,7 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
pgd_sz = kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
- pgt->pgd = mm_ops->zalloc_pages_exact(pgd_sz);
+ pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_pages_exact(pgd_sz);
if (!pgt->pgd)
return -ENOMEM;
@@ -1200,20 +1217,18 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
return 0;
}
-static int stage2_free_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
- enum kvm_pgtable_walk_flags flag,
- void * const arg)
+static int stage2_free_walker(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
{
- struct kvm_pgtable_mm_ops *mm_ops = arg;
- kvm_pte_t pte = *ptep;
+ struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
- if (!stage2_pte_is_counted(pte))
+ if (!stage2_pte_is_counted(ctx->old))
return 0;
- mm_ops->put_page(ptep);
+ mm_ops->put_page(ctx->ptep);
- if (kvm_pte_table(pte, level))
- mm_ops->put_page(kvm_pte_follow(pte, mm_ops));
+ if (kvm_pte_table(ctx->old, ctx->level))
+ mm_ops->put_page(kvm_pte_follow(ctx->old, mm_ops));
return 0;
}
@@ -1225,11 +1240,33 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
.cb = stage2_free_walker,
.flags = KVM_PGTABLE_WALK_LEAF |
KVM_PGTABLE_WALK_TABLE_POST,
- .arg = pgt->mm_ops,
};
WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE;
- pgt->mm_ops->free_pages_exact(pgt->pgd, pgd_sz);
+ pgt->mm_ops->free_pages_exact(kvm_dereference_pteref(&walker, pgt->pgd), pgd_sz);
pgt->pgd = NULL;
}
+
+void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level)
+{
+ kvm_pteref_t ptep = (kvm_pteref_t)pgtable;
+ struct kvm_pgtable_walker walker = {
+ .cb = stage2_free_walker,
+ .flags = KVM_PGTABLE_WALK_LEAF |
+ KVM_PGTABLE_WALK_TABLE_POST,
+ };
+ struct kvm_pgtable_walk_data data = {
+ .walker = &walker,
+
+ /*
+ * At this point the IPA really doesn't matter, as the page
+ * table being traversed has already been removed from the stage
+ * 2. Set an appropriate range to cover the entire page table.
+ */
+ .addr = 0,
+ .end = kvm_granule_size(level),
+ };
+
+ WARN_ON(__kvm_pgtable_walk(&data, mm_ops, ptep, level + 1));
+}
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 1ef0704420d9..0fe5945bc4fb 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -128,6 +128,25 @@ static void kvm_s2_free_pages_exact(void *virt, size_t size)
free_pages_exact(virt, size);
}
+static struct kvm_pgtable_mm_ops kvm_s2_mm_ops;
+
+static void stage2_free_removed_table_rcu_cb(struct rcu_head *head)
+{
+ struct page *page = container_of(head, struct page, rcu_head);
+ void *pgtable = page_to_virt(page);
+ u32 level = page_private(page);
+
+ kvm_pgtable_stage2_free_removed(&kvm_s2_mm_ops, pgtable, level);
+}
+
+static void stage2_free_removed_table(void *addr, u32 level)
+{
+ struct page *page = virt_to_page(addr);
+
+ set_page_private(page, (unsigned long)level);
+ call_rcu(&page->rcu_head, stage2_free_removed_table_rcu_cb);
+}
+
static void kvm_host_get_page(void *addr)
{
get_page(virt_to_page(addr));
@@ -640,7 +659,7 @@ static struct kvm_pgtable_mm_ops kvm_user_mm_ops = {
static int get_user_mapping_size(struct kvm *kvm, u64 addr)
{
struct kvm_pgtable pgt = {
- .pgd = (kvm_pte_t *)kvm->mm->pgd,
+ .pgd = (kvm_pteref_t)kvm->mm->pgd,
.ia_bits = vabits_actual,
.start_level = (KVM_PGTABLE_MAX_LEVELS -
CONFIG_PGTABLE_LEVELS),
@@ -662,6 +681,7 @@ static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = {
.zalloc_page = stage2_memcache_zalloc_page,
.zalloc_pages_exact = kvm_s2_zalloc_pages_exact,
.free_pages_exact = kvm_s2_free_pages_exact,
+ .free_removed_table = stage2_free_removed_table,
.get_page = kvm_host_get_page,
.put_page = kvm_s2_put_page,
.page_count = kvm_host_page_count,
@@ -841,7 +861,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
write_lock(&kvm->mmu_lock);
ret = kvm_pgtable_stage2_map(pgt, addr, PAGE_SIZE, pa, prot,
- &cache);
+ &cache, 0);
write_unlock(&kvm->mmu_lock);
if (ret)
break;
@@ -1136,7 +1156,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
gfn_t gfn;
kvm_pfn_t pfn;
bool logging_active = memslot_is_logging(memslot);
- bool use_read_lock = false;
unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
unsigned long vma_pagesize, fault_granule;
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
@@ -1171,8 +1190,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (logging_active) {
force_pte = true;
vma_shift = PAGE_SHIFT;
- use_read_lock = (fault_status == FSC_PERM && write_fault &&
- fault_granule == PAGE_SIZE);
} else {
vma_shift = get_vma_page_shift(vma, hva);
}
@@ -1271,15 +1288,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (exec_fault && device)
return -ENOEXEC;
- /*
- * To reduce MMU contentions and enhance concurrency during dirty
- * logging dirty logging, only acquire read lock for permission
- * relaxation.
- */
- if (use_read_lock)
- read_lock(&kvm->mmu_lock);
- else
- write_lock(&kvm->mmu_lock);
+ read_lock(&kvm->mmu_lock);
pgt = vcpu->arch.hw_mmu->pgt;
if (mmu_invalidate_retry(kvm, mmu_seq))
goto out_unlock;
@@ -1323,15 +1332,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* permissions only if vma_pagesize equals fault_granule. Otherwise,
* kvm_pgtable_stage2_map() should be called to change block size.
*/
- if (fault_status == FSC_PERM && vma_pagesize == fault_granule) {
+ if (fault_status == FSC_PERM && vma_pagesize == fault_granule)
ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
- } else {
- WARN_ONCE(use_read_lock, "Attempted stage-2 map outside of write lock\n");
-
+ else
ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
__pfn_to_phys(pfn), prot,
- memcache);
- }
+ memcache, KVM_PGTABLE_WALK_SHARED);
/* Mark the page dirty only if the fault is handled successfully */
if (writable && !ret) {
@@ -1340,10 +1346,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
}
out_unlock:
- if (use_read_lock)
- read_unlock(&kvm->mmu_lock);
- else
- write_unlock(&kvm->mmu_lock);
+ read_unlock(&kvm->mmu_lock);
kvm_set_pfn_accessed(pfn);
kvm_release_pfn_clean(pfn);
return ret != -EAGAIN ? ret : 0;
@@ -1549,7 +1552,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
*/
kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT,
PAGE_SIZE, __pfn_to_phys(pfn),
- KVM_PGTABLE_PROT_R, NULL);
+ KVM_PGTABLE_PROT_R, NULL, 0);
return false;
}