[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 3/3] xen/privcmd: add IOCTL_PRIVCMD_RESTRICT



The purpose if this ioctl is to allow a user of privcmd to restrict its
operation such that it will no longer service arbitrary hypercalls via
IOCTL_PRIVCMD_HYPERCALL, and will check for a matching domid when
servicing IOCTL_PRIVCMD_DM_OP. The aim of this is to limit the attack
surface for a compromised device model.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
Cc: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
Cc: Juergen Gross <jgross@xxxxxxxx>

v3:
- Extend restriction to mapping ioctls

v2:
- Make sure that a restriction cannot be cleared
---
 drivers/xen/privcmd.c      | 88 +++++++++++++++++++++++++++++++++++++++++-----
 include/uapi/xen/privcmd.h |  2 ++
 2 files changed, 81 insertions(+), 9 deletions(-)

diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index a33f17e..f50d984 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -56,16 +56,25 @@ module_param_named(dm_op_buf_max_size, 
privcmd_dm_op_buf_max_size, uint,
 MODULE_PARM_DESC(dm_op_buf_max_size,
                 "Maximum size of a dm_op hypercall buffer");
 
+struct privcmd_data {
+       domid_t domid;
+};
+
 static int privcmd_vma_range_is_mapped(
                struct vm_area_struct *vma,
                unsigned long addr,
                unsigned long nr_pages);
 
-static long privcmd_ioctl_hypercall(void __user *udata)
+static long privcmd_ioctl_hypercall(struct file *file, void __user *udata)
 {
+       struct privcmd_data *data = file->private_data;
        struct privcmd_hypercall hypercall;
        long ret;
 
+       /* Disallow arbitrary hypercalls if restricted */
+       if (data->domid != DOMID_INVALID)
+               return -EPERM;
+
        if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
                return -EFAULT;
 
@@ -242,8 +251,9 @@ static int mmap_gfn_range(void *data, void *state)
        return 0;
 }
 
-static long privcmd_ioctl_mmap(void __user *udata)
+static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
 {
+       struct privcmd_data *data = file->private_data;
        struct privcmd_mmap mmapcmd;
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
@@ -258,6 +268,10 @@ static long privcmd_ioctl_mmap(void __user *udata)
        if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
                return -EFAULT;
 
+       /* If restriction is in place, check the domid matches */
+       if (data->domid != DOMID_INVALID && data->domid != mmapcmd.dom)
+               return -EPERM;
+
        rc = gather_array(&pagelist,
                          mmapcmd.num, sizeof(struct privcmd_mmap_entry),
                          mmapcmd.entry);
@@ -429,8 +443,10 @@ static int alloc_empty_pages(struct vm_area_struct *vma, 
int numpgs)
 
 static const struct vm_operations_struct privcmd_vm_ops;
 
-static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
+static long privcmd_ioctl_mmap_batch(
+       struct file *file, void __user *udata, int version)
 {
+       struct privcmd_data *data = file->private_data;
        int ret;
        struct privcmd_mmapbatch_v2 m;
        struct mm_struct *mm = current->mm;
@@ -459,6 +475,10 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, 
int version)
                return -EINVAL;
        }
 
+       /* If restriction is in place, check the domid matches */
+       if (data->domid != DOMID_INVALID && data->domid != m.dom)
+               return -EPERM;
+
        nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE);
        if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
                return -EINVAL;
@@ -603,8 +623,9 @@ static void unlock_pages(struct page *pages[], unsigned int 
nr_pages)
        }
 }
 
-static long privcmd_ioctl_dm_op(void __user *udata)
+static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
 {
+       struct privcmd_data *data = file->private_data;
        struct privcmd_dm_op kdata;
        struct privcmd_dm_op_buf *kbufs;
        unsigned int nr_pages = 0;
@@ -616,6 +637,10 @@ static long privcmd_ioctl_dm_op(void __user *udata)
        if (copy_from_user(&kdata, udata, sizeof(kdata)))
                return -EFAULT;
 
+       /* If restriction is in place, check the domid matches */
+       if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
+               return -EPERM;
+
        if (kdata.num == 0)
                return 0;
 
@@ -683,6 +708,23 @@ static long privcmd_ioctl_dm_op(void __user *udata)
        return rc;
 }
 
+static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
+{
+       struct privcmd_data *data = file->private_data;
+       domid_t dom;
+
+       if (copy_from_user(&dom, udata, sizeof(dom)))
+               return -EFAULT;
+
+       /* Set restriction to the specified domain, or check it matches */
+       if (data->domid == DOMID_INVALID)
+               data->domid = dom;
+       else if (data->domid != dom)
+               return -EINVAL;
+
+       return 0;
+}
+
 static long privcmd_ioctl(struct file *file,
                          unsigned int cmd, unsigned long data)
 {
@@ -691,23 +733,27 @@ static long privcmd_ioctl(struct file *file,
 
        switch (cmd) {
        case IOCTL_PRIVCMD_HYPERCALL:
-               ret = privcmd_ioctl_hypercall(udata);
+               ret = privcmd_ioctl_hypercall(file, udata);
                break;
 
        case IOCTL_PRIVCMD_MMAP:
-               ret = privcmd_ioctl_mmap(udata);
+               ret = privcmd_ioctl_mmap(file, udata);
                break;
 
        case IOCTL_PRIVCMD_MMAPBATCH:
-               ret = privcmd_ioctl_mmap_batch(udata, 1);
+               ret = privcmd_ioctl_mmap_batch(file, udata, 1);
                break;
 
        case IOCTL_PRIVCMD_MMAPBATCH_V2:
-               ret = privcmd_ioctl_mmap_batch(udata, 2);
+               ret = privcmd_ioctl_mmap_batch(file, udata, 2);
                break;
 
        case IOCTL_PRIVCMD_DM_OP:
-               ret = privcmd_ioctl_dm_op(udata);
+               ret = privcmd_ioctl_dm_op(file, udata);
+               break;
+
+       case IOCTL_PRIVCMD_RESTRICT:
+               ret = privcmd_ioctl_restrict(file, udata);
                break;
 
        default:
@@ -717,6 +763,28 @@ static long privcmd_ioctl(struct file *file,
        return ret;
 }
 
+static int privcmd_open(struct inode *ino, struct file *file)
+{
+       struct privcmd_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
+
+       if (!data)
+               return -ENOMEM;
+
+       /* DOMID_INVALID implies no restriction */
+       data->domid = DOMID_INVALID;
+
+       file->private_data = data;
+       return 0;
+}
+
+static int privcmd_release(struct inode *ino, struct file *file)
+{
+       struct privcmd_data *data = file->private_data;
+
+       kfree(data);
+       return 0;
+}
+
 static void privcmd_close(struct vm_area_struct *vma)
 {
        struct page **pages = vma->vm_private_data;
@@ -785,6 +853,8 @@ static int privcmd_vma_range_is_mapped(
 const struct file_operations xen_privcmd_fops = {
        .owner = THIS_MODULE,
        .unlocked_ioctl = privcmd_ioctl,
+       .open = privcmd_open,
+       .release = privcmd_release,
        .mmap = privcmd_mmap,
 };
 EXPORT_SYMBOL_GPL(xen_privcmd_fops);
diff --git a/include/uapi/xen/privcmd.h b/include/uapi/xen/privcmd.h
index f8c5d75..63ee95c 100644
--- a/include/uapi/xen/privcmd.h
+++ b/include/uapi/xen/privcmd.h
@@ -111,5 +111,7 @@ struct privcmd_dm_op {
        _IOC(_IOC_NONE, 'P', 4, sizeof(struct privcmd_mmapbatch_v2))
 #define IOCTL_PRIVCMD_DM_OP                                    \
        _IOC(_IOC_NONE, 'P', 5, sizeof(struct privcmd_dm_op))
+#define IOCTL_PRIVCMD_RESTRICT                                 \
+       _IOC(_IOC_NONE, 'P', 6, sizeof(domid_t))
 
 #endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.