summaryrefslogtreecommitdiff
path: root/drivers/xen/privcmd.c
diff options
context:
space:
mode:
authorPaul Durrant <paul.durrant@citrix.com>2017-02-13 20:03:23 +0300
committerBoris Ostrovsky <boris.ostrovsky@oracle.com>2017-02-14 23:13:43 +0300
commitab520be8cd5d56867fc95cfbc34b90880faf1f9d (patch)
tree4c432a918ffbd6308f752eaac36b322b811582b1 /drivers/xen/privcmd.c
parentdc9eab6fd94dd26340749321bba2c58634761516 (diff)
downloadlinux-ab520be8cd5d56867fc95cfbc34b90880faf1f9d.tar.xz
xen/privcmd: Add IOCTL_PRIVCMD_DM_OP
Recently a new dm_op[1] hypercall was added to Xen to provide a mechanism for restricting device emulators (such as QEMU) to a limited set of hypervisor operations, and being able to audit those operations in the kernel of the domain in which they run. This patch adds IOCTL_PRIVCMD_DM_OP as gateway for __HYPERVISOR_dm_op. NOTE: There is no requirement for user-space code to bounce data through locked memory buffers (as with IOCTL_PRIVCMD_HYPERCALL) since privcmd has enough information to lock the original buffers directly. [1] http://xenbits.xen.org/gitweb/?p=xen.git;a=commit;h=524a98c2 Signed-off-by: Paul Durrant <paul.durrant@citrix.com> Acked-by: Stefano Stabellini <sstabellini@kernel.org> Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Diffstat (limited to 'drivers/xen/privcmd.c')
-rw-r--r--drivers/xen/privcmd.c139
1 files changed, 139 insertions, 0 deletions
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 5e5c7aef0c9f..1a6f1860e008 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -22,6 +22,7 @@
#include <linux/pagemap.h>
#include <linux/seq_file.h>
#include <linux/miscdevice.h>
+#include <linux/moduleparam.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
@@ -32,6 +33,7 @@
#include <xen/xen.h>
#include <xen/privcmd.h>
#include <xen/interface/xen.h>
+#include <xen/interface/hvm/dm_op.h>
#include <xen/features.h>
#include <xen/page.h>
#include <xen/xen-ops.h>
@@ -43,6 +45,17 @@ MODULE_LICENSE("GPL");
#define PRIV_VMA_LOCKED ((void *)1)
+static unsigned int privcmd_dm_op_max_num = 16;
+module_param_named(dm_op_max_nr_bufs, privcmd_dm_op_max_num, uint, 0644);
+MODULE_PARM_DESC(dm_op_max_nr_bufs,
+ "Maximum number of buffers per dm_op hypercall");
+
+static unsigned int privcmd_dm_op_buf_max_size = 4096;
+module_param_named(dm_op_buf_max_size, privcmd_dm_op_buf_max_size, uint,
+ 0644);
+MODULE_PARM_DESC(dm_op_buf_max_size,
+ "Maximum size of a dm_op hypercall buffer");
+
static int privcmd_vma_range_is_mapped(
struct vm_area_struct *vma,
unsigned long addr,
@@ -548,6 +561,128 @@ out_unlock:
goto out;
}
+static int lock_pages(
+ struct privcmd_dm_op_buf kbufs[], unsigned int num,
+ struct page *pages[], unsigned int nr_pages)
+{
+ unsigned int i;
+
+ for (i = 0; i < num; i++) {
+ unsigned int requested;
+ int pinned;
+
+ requested = DIV_ROUND_UP(
+ offset_in_page(kbufs[i].uptr) + kbufs[i].size,
+ PAGE_SIZE);
+ if (requested > nr_pages)
+ return -ENOSPC;
+
+ pinned = get_user_pages_fast(
+ (unsigned long) kbufs[i].uptr,
+ requested, FOLL_WRITE, pages);
+ if (pinned < 0)
+ return pinned;
+
+ nr_pages -= pinned;
+ pages += pinned;
+ }
+
+ return 0;
+}
+
+static void unlock_pages(struct page *pages[], unsigned int nr_pages)
+{
+ unsigned int i;
+
+ if (!pages)
+ return;
+
+ for (i = 0; i < nr_pages; i++) {
+ if (pages[i])
+ put_page(pages[i]);
+ }
+}
+
+static long privcmd_ioctl_dm_op(void __user *udata)
+{
+ struct privcmd_dm_op kdata;
+ struct privcmd_dm_op_buf *kbufs;
+ unsigned int nr_pages = 0;
+ struct page **pages = NULL;
+ struct xen_dm_op_buf *xbufs = NULL;
+ unsigned int i;
+ long rc;
+
+ if (copy_from_user(&kdata, udata, sizeof(kdata)))
+ return -EFAULT;
+
+ if (kdata.num == 0)
+ return 0;
+
+ if (kdata.num > privcmd_dm_op_max_num)
+ return -E2BIG;
+
+ kbufs = kcalloc(kdata.num, sizeof(*kbufs), GFP_KERNEL);
+ if (!kbufs)
+ return -ENOMEM;
+
+ if (copy_from_user(kbufs, kdata.ubufs,
+ sizeof(*kbufs) * kdata.num)) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ for (i = 0; i < kdata.num; i++) {
+ if (kbufs[i].size > privcmd_dm_op_buf_max_size) {
+ rc = -E2BIG;
+ goto out;
+ }
+
+ if (!access_ok(VERIFY_WRITE, kbufs[i].uptr,
+ kbufs[i].size)) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ nr_pages += DIV_ROUND_UP(
+ offset_in_page(kbufs[i].uptr) + kbufs[i].size,
+ PAGE_SIZE);
+ }
+
+ pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
+ if (!pages) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ xbufs = kcalloc(kdata.num, sizeof(*xbufs), GFP_KERNEL);
+ if (!xbufs) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = lock_pages(kbufs, kdata.num, pages, nr_pages);
+ if (rc)
+ goto out;
+
+ for (i = 0; i < kdata.num; i++) {
+ set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
+ xbufs[i].size = kbufs[i].size;
+ }
+
+ xen_preemptible_hcall_begin();
+ rc = HYPERVISOR_dm_op(kdata.dom, kdata.num, xbufs);
+ xen_preemptible_hcall_end();
+
+out:
+ unlock_pages(pages, nr_pages);
+ kfree(xbufs);
+ kfree(pages);
+ kfree(kbufs);
+
+ return rc;
+}
+
static long privcmd_ioctl(struct file *file,
unsigned int cmd, unsigned long data)
{
@@ -571,6 +706,10 @@ static long privcmd_ioctl(struct file *file,
ret = privcmd_ioctl_mmap_batch(udata, 2);
break;
+ case IOCTL_PRIVCMD_DM_OP:
+ ret = privcmd_ioctl_dm_op(udata);
+ break;
+
default:
break;
}