summaryrefslogtreecommitdiff
path: root/arch/powerpc/kvm/book3s_64_mmu_hv.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/book3s_64_mmu_hv.c')
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c203
1 files changed, 166 insertions, 37 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 534acb3c6c3d..1a4acf8bf4f4 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -27,6 +27,7 @@
#include <linux/srcu.h>
#include <linux/anon_inodes.h>
#include <linux/file.h>
+#include <linux/debugfs.h>
#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
@@ -116,12 +117,12 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp)
long order;
mutex_lock(&kvm->lock);
- if (kvm->arch.rma_setup_done) {
- kvm->arch.rma_setup_done = 0;
- /* order rma_setup_done vs. vcpus_running */
+ if (kvm->arch.hpte_setup_done) {
+ kvm->arch.hpte_setup_done = 0;
+ /* order hpte_setup_done vs. vcpus_running */
smp_mb();
if (atomic_read(&kvm->arch.vcpus_running)) {
- kvm->arch.rma_setup_done = 1;
+ kvm->arch.hpte_setup_done = 1;
goto out;
}
}
@@ -338,9 +339,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
gr = kvm->arch.revmap[index].guest_rpte;
- /* Unlock the HPTE */
- asm volatile("lwsync" : : : "memory");
- hptep[0] = cpu_to_be64(v);
+ unlock_hpte(hptep, v);
preempt_enable();
gpte->eaddr = eaddr;
@@ -469,8 +468,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
hpte[0] = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
hpte[1] = be64_to_cpu(hptep[1]);
hpte[2] = r = rev->guest_rpte;
- asm volatile("lwsync" : : : "memory");
- hptep[0] = cpu_to_be64(hpte[0]);
+ unlock_hpte(hptep, hpte[0]);
preempt_enable();
if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
@@ -537,23 +535,21 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
/* if the guest wants write access, see if that is OK */
if (!writing && hpte_is_writable(r)) {
- unsigned int hugepage_shift;
pte_t *ptep, pte;
-
+ unsigned long flags;
/*
* We need to protect against page table destruction
- * while looking up and updating the pte.
+ * hugepage split and collapse.
*/
- rcu_read_lock_sched();
+ local_irq_save(flags);
ptep = find_linux_pte_or_hugepte(current->mm->pgd,
- hva, &hugepage_shift);
+ hva, NULL);
if (ptep) {
- pte = kvmppc_read_update_linux_pte(ptep, 1,
- hugepage_shift);
+ pte = kvmppc_read_update_linux_pte(ptep, 1);
if (pte_write(pte))
write_ok = 1;
}
- rcu_read_unlock_sched();
+ local_irq_restore(flags);
}
}
@@ -621,7 +617,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
hptep[1] = cpu_to_be64(r);
eieio();
- hptep[0] = cpu_to_be64(hpte[0]);
+ __unlock_hpte(hptep, hpte[0]);
asm volatile("ptesync" : : : "memory");
preempt_enable();
if (page && hpte_is_writable(r))
@@ -642,7 +638,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
return ret;
out_unlock:
- hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
+ __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
preempt_enable();
goto out_put;
}
@@ -771,7 +767,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
}
}
unlock_rmap(rmapp);
- hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
+ __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
}
return 0;
}
@@ -857,7 +853,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
}
ret = 1;
}
- hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
+ __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
} while ((i = j) != head);
unlock_rmap(rmapp);
@@ -974,8 +970,7 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
/* Now check and modify the HPTE */
if (!(hptep[0] & cpu_to_be64(HPTE_V_VALID))) {
- /* unlock and continue */
- hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
+ __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
continue;
}
@@ -996,9 +991,9 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
npages_dirty = n;
eieio();
}
- v &= ~(HPTE_V_ABSENT | HPTE_V_HVLOCK);
+ v &= ~HPTE_V_ABSENT;
v |= HPTE_V_VALID;
- hptep[0] = cpu_to_be64(v);
+ __unlock_hpte(hptep, v);
} while ((i = j) != head);
unlock_rmap(rmapp);
@@ -1218,8 +1213,7 @@ static long record_hpte(unsigned long flags, __be64 *hptp,
r &= ~HPTE_GR_MODIFIED;
revp->guest_rpte = r;
}
- asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
- hptp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
+ unlock_hpte(hptp, be64_to_cpu(hptp[0]));
preempt_enable();
if (!(valid == want_valid && (first_pass || dirty)))
ok = 0;
@@ -1339,20 +1333,20 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
unsigned long tmp[2];
ssize_t nb;
long int err, ret;
- int rma_setup;
+ int hpte_setup;
if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT;
/* lock out vcpus from running while we're doing this */
mutex_lock(&kvm->lock);
- rma_setup = kvm->arch.rma_setup_done;
- if (rma_setup) {
- kvm->arch.rma_setup_done = 0; /* temporarily */
- /* order rma_setup_done vs. vcpus_running */
+ hpte_setup = kvm->arch.hpte_setup_done;
+ if (hpte_setup) {
+ kvm->arch.hpte_setup_done = 0; /* temporarily */
+ /* order hpte_setup_done vs. vcpus_running */
smp_mb();
if (atomic_read(&kvm->arch.vcpus_running)) {
- kvm->arch.rma_setup_done = 1;
+ kvm->arch.hpte_setup_done = 1;
mutex_unlock(&kvm->lock);
return -EBUSY;
}
@@ -1405,7 +1399,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
"r=%lx\n", ret, i, v, r);
goto out;
}
- if (!rma_setup && is_vrma_hpte(v)) {
+ if (!hpte_setup && is_vrma_hpte(v)) {
unsigned long psize = hpte_base_page_size(v, r);
unsigned long senc = slb_pgsize_encoding(psize);
unsigned long lpcr;
@@ -1414,7 +1408,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
(VRMA_VSID << SLB_VSID_SHIFT_1T);
lpcr = senc << (LPCR_VRMASD_SH - 4);
kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
- rma_setup = 1;
+ hpte_setup = 1;
}
++i;
hptp += 2;
@@ -1430,9 +1424,9 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
}
out:
- /* Order HPTE updates vs. rma_setup_done */
+ /* Order HPTE updates vs. hpte_setup_done */
smp_wmb();
- kvm->arch.rma_setup_done = rma_setup;
+ kvm->arch.hpte_setup_done = hpte_setup;
mutex_unlock(&kvm->lock);
if (err)
@@ -1495,6 +1489,141 @@ int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *ghf)
return ret;
}
+struct debugfs_htab_state {
+ struct kvm *kvm;
+ struct mutex mutex;
+ unsigned long hpt_index;
+ int chars_left;
+ int buf_index;
+ char buf[64];
+};
+
+static int debugfs_htab_open(struct inode *inode, struct file *file)
+{
+ struct kvm *kvm = inode->i_private;
+ struct debugfs_htab_state *p;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ kvm_get_kvm(kvm);
+ p->kvm = kvm;
+ mutex_init(&p->mutex);
+ file->private_data = p;
+
+ return nonseekable_open(inode, file);
+}
+
+static int debugfs_htab_release(struct inode *inode, struct file *file)
+{
+ struct debugfs_htab_state *p = file->private_data;
+
+ kvm_put_kvm(p->kvm);
+ kfree(p);
+ return 0;
+}
+
+static ssize_t debugfs_htab_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct debugfs_htab_state *p = file->private_data;
+ ssize_t ret, r;
+ unsigned long i, n;
+ unsigned long v, hr, gr;
+ struct kvm *kvm;
+ __be64 *hptp;
+
+ ret = mutex_lock_interruptible(&p->mutex);
+ if (ret)
+ return ret;
+
+ if (p->chars_left) {
+ n = p->chars_left;
+ if (n > len)
+ n = len;
+ r = copy_to_user(buf, p->buf + p->buf_index, n);
+ n -= r;
+ p->chars_left -= n;
+ p->buf_index += n;
+ buf += n;
+ len -= n;
+ ret = n;
+ if (r) {
+ if (!n)
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+
+ kvm = p->kvm;
+ i = p->hpt_index;
+ hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
+ for (; len != 0 && i < kvm->arch.hpt_npte; ++i, hptp += 2) {
+ if (!(be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)))
+ continue;
+
+ /* lock the HPTE so it's stable and read it */
+ preempt_disable();
+ while (!try_lock_hpte(hptp, HPTE_V_HVLOCK))
+ cpu_relax();
+ v = be64_to_cpu(hptp[0]) & ~HPTE_V_HVLOCK;
+ hr = be64_to_cpu(hptp[1]);
+ gr = kvm->arch.revmap[i].guest_rpte;
+ unlock_hpte(hptp, v);
+ preempt_enable();
+
+ if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT)))
+ continue;
+
+ n = scnprintf(p->buf, sizeof(p->buf),
+ "%6lx %.16lx %.16lx %.16lx\n",
+ i, v, hr, gr);
+ p->chars_left = n;
+ if (n > len)
+ n = len;
+ r = copy_to_user(buf, p->buf, n);
+ n -= r;
+ p->chars_left -= n;
+ p->buf_index = n;
+ buf += n;
+ len -= n;
+ ret += n;
+ if (r) {
+ if (!ret)
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+ p->hpt_index = i;
+
+ out:
+ mutex_unlock(&p->mutex);
+ return ret;
+}
+
+ssize_t debugfs_htab_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ return -EACCES;
+}
+
+static const struct file_operations debugfs_htab_fops = {
+ .owner = THIS_MODULE,
+ .open = debugfs_htab_open,
+ .release = debugfs_htab_release,
+ .read = debugfs_htab_read,
+ .write = debugfs_htab_write,
+ .llseek = generic_file_llseek,
+};
+
+void kvmppc_mmu_debugfs_init(struct kvm *kvm)
+{
+ kvm->arch.htab_dentry = debugfs_create_file("htab", 0400,
+ kvm->arch.debugfs_dir, kvm,
+ &debugfs_htab_fops);
+}
+
void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
{
struct kvmppc_mmu *mmu = &vcpu->arch.mmu;