summaryrefslogtreecommitdiff
path: root/drivers/xen/privcmd.c
diff options
context:
space:
mode:
authorJulien Grall <julien.grall@citrix.com>2015-05-05 18:54:12 +0300
committerDavid Vrabel <david.vrabel@citrix.com>2015-10-23 16:20:42 +0300
commit5995a68a6272e4e8f4fe4de82cdc877e650fe8be (patch)
tree2699b77a0e00a61dfb0a1d3eb65b4645a67274a8 /drivers/xen/privcmd.c
parentd0089e8a0e4c9723d85b01713671358e3d6960df (diff)
downloadlinux-5995a68a6272e4e8f4fe4de82cdc877e650fe8be.tar.xz
xen/privcmd: Add support for Linux 64KB page granularity
The hypercall interface (as well as the toolstack) is always using 4KB page granularity. When the toolstack is asking for mapping a series of guest PFN in a batch, it expects to have the page map contiguously in its virtual memory. When Linux is using 64KB page granularity, the privcmd driver will have to map multiple Xen PFN in a single Linux page. Note that this solution works on page granularity which is a multiple of 4KB. Signed-off-by: Julien Grall <julien.grall@citrix.com> Reviewed-by: David Vrabel <david.vrabel@citrix.com> Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Diffstat (limited to 'drivers/xen/privcmd.c')
-rw-r--r--drivers/xen/privcmd.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index b199ad3d4587..df2e6f783318 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -446,7 +446,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
return -EINVAL;
}
- nr_pages = m.num;
+ nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE);
if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
return -EINVAL;
@@ -494,7 +494,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
goto out_unlock;
}
if (xen_feature(XENFEAT_auto_translated_physmap)) {
- ret = alloc_empty_pages(vma, m.num);
+ ret = alloc_empty_pages(vma, nr_pages);
if (ret < 0)
goto out_unlock;
} else
@@ -518,6 +518,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
state.global_error = 0;
state.version = version;
+ BUILD_BUG_ON(((PAGE_SIZE / sizeof(xen_pfn_t)) % XEN_PFN_PER_PAGE) != 0);
/* mmap_batch_fn guarantees ret == 0 */
BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
&pagelist, mmap_batch_fn, &state));
@@ -582,12 +583,13 @@ static void privcmd_close(struct vm_area_struct *vma)
{
struct page **pages = vma->vm_private_data;
int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
int rc;
if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
return;
- rc = xen_unmap_domain_gfn_range(vma, numpgs, pages);
+ rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
if (rc == 0)
free_xenballooned_pages(numpgs, pages);
else