[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH-for-4.9 v1 4/8] dm_op: convert HVMOP_set_pci_intx_level, HVMOP_set_isa_irq_level, and...



... HVMOP_set_pci_link_route

Suggested-by: Jan Beulich <jbeulich@xxxxxxxx>
Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
Cc: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 tools/flask/policy/modules/xen.if   |   8 +--
 tools/libxc/xc_misc.c               |  81 +++++++--------------
 xen/arch/x86/hvm/dm.c               |  80 +++++++++++++++++++++
 xen/arch/x86/hvm/hvm.c              | 136 ------------------------------------
 xen/include/public/hvm/dm_op.h      |  42 +++++++++++
 xen/include/public/hvm/hvm_op.h     |   4 ++
 xen/include/xsm/dummy.h             |  18 -----
 xen/include/xsm/xsm.h               |  18 -----
 xen/xsm/dummy.c                     |   3 -
 xen/xsm/flask/hooks.c               |  15 ----
 xen/xsm/flask/policy/access_vectors |   6 --
 11 files changed, 154 insertions(+), 257 deletions(-)

diff --git a/tools/flask/policy/modules/xen.if 
b/tools/flask/policy/modules/xen.if
index 366273e..e6dfaf0 100644
--- a/tools/flask/policy/modules/xen.if
+++ b/tools/flask/policy/modules/xen.if
@@ -57,8 +57,8 @@ define(`create_domain_common', `
        allow $1 $2:shadow enable;
        allow $1 $2:mmu { map_read map_write adjust memorymap physmap pinpage 
mmuext_op updatemp };
        allow $1 $2:grant setup;
-       allow $1 $2:hvm { cacheattr getparam hvmctl irqlevel pciroute sethvmc
-                       setparam pcilevel nested altp2mhvm altp2mhvm_op };
+       allow $1 $2:hvm { cacheattr getparam hvmctl sethvmc
+                       setparam nested altp2mhvm altp2mhvm_op };
 ')
 
 # create_domain(priv, target)
@@ -93,7 +93,7 @@ define(`manage_domain', `
 #   (inbound migration is the same as domain creation)
 define(`migrate_domain_out', `
        allow $1 domxen_t:mmu map_read;
-       allow $1 $2:hvm { gethvmc getparam irqlevel };
+       allow $1 $2:hvm { gethvmc getparam };
        allow $1 $2:mmu { stat pageinfo map_read };
        allow $1 $2:domain { getaddrsize getvcpucontext pause destroy };
        allow $1 $2:domain2 gettsc;
@@ -151,7 +151,7 @@ define(`device_model', `
 
        allow $1 $2_target:domain { getdomaininfo shutdown };
        allow $1 $2_target:mmu { map_read map_write adjust physmap target_hack 
};
-       allow $1 $2_target:hvm { getparam setparam hvmctl irqlevel pciroute 
pcilevel cacheattr send_irq dm };
+       allow $1 $2_target:hvm { getparam setparam hvmctl cacheattr send_irq dm 
};
 ')
 
 # make_device_model(priv, dm_dom, hvm_dom)
diff --git a/tools/libxc/xc_misc.c b/tools/libxc/xc_misc.c
index 3651cab..842b699 100644
--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -473,30 +473,19 @@ int xc_hvm_set_pci_intx_level(
     uint8_t domain, uint8_t bus, uint8_t device, uint8_t intx,
     unsigned int level)
 {
-    DECLARE_HYPERCALL_BUFFER(struct xen_hvm_set_pci_intx_level, arg);
-    int rc;
-
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-    {
-        PERROR("Could not allocate memory for xc_hvm_set_pci_intx_level 
hypercall");
-        return -1;
-    }
-
-    arg->domid  = dom;
-    arg->domain = domain;
-    arg->bus    = bus;
-    arg->device = device;
-    arg->intx   = intx;
-    arg->level  = level;
+    struct xen_dm_op op;
+    struct xen_dm_op_set_pci_intx_level *data;
 
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_set_pci_intx_level,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
+    op.op = DMOP_set_pci_intx_level;
+    data = &op.u.set_pci_intx_level;
 
-    xc_hypercall_buffer_free(xch, arg);
+    data->domain = domain;
+    data->bus = bus;
+    data->device = device;
+    data->intx = intx;
+    data->level = level;
 
-    return rc;
+    return do_dm_op(xch, dom, 1, &op, sizeof(op));
 }
 
 int xc_hvm_set_isa_irq_level(
@@ -504,53 +493,31 @@ int xc_hvm_set_isa_irq_level(
     uint8_t isa_irq,
     unsigned int level)
 {
-    DECLARE_HYPERCALL_BUFFER(struct xen_hvm_set_isa_irq_level, arg);
-    int rc;
-
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-    {
-        PERROR("Could not allocate memory for xc_hvm_set_isa_irq_level 
hypercall");
-        return -1;
-    }
+    struct xen_dm_op op;
+    struct xen_dm_op_set_isa_irq_level *data;
 
-    arg->domid   = dom;
-    arg->isa_irq = isa_irq;
-    arg->level   = level;
+    op.op = DMOP_set_isa_irq_level;
+    data = &op.u.set_isa_irq_level;
 
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_set_isa_irq_level,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
+    data->isa_irq = isa_irq;
+    data->level = level;
 
-    return rc;
+    return do_dm_op(xch, dom, 1, &op, sizeof(op));
 }
 
 int xc_hvm_set_pci_link_route(
     xc_interface *xch, domid_t dom, uint8_t link, uint8_t isa_irq)
 {
-    DECLARE_HYPERCALL_BUFFER(struct xen_hvm_set_pci_link_route, arg);
-    int rc;
-
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-    {
-        PERROR("Could not allocate memory for xc_hvm_set_pci_link_route 
hypercall");
-        return -1;
-    }
+    struct xen_dm_op op;
+    struct xen_dm_op_set_pci_link_route *data;
 
-    arg->domid   = dom;
-    arg->link    = link;
-    arg->isa_irq = isa_irq;
+    op.op = DMOP_set_pci_link_route;
+    data = &op.u.set_pci_link_route;
 
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_set_pci_link_route,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
+    data->link = link;
+    data->isa_irq = isa_irq;
 
-    return rc;
+    return do_dm_op(xch, dom, 1, &op, sizeof(op));
 }
 
 int xc_hvm_inject_msi(
diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index 78dd6e7..b8edf2c 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -105,6 +105,60 @@ static int dm_op_track_dirty_vram(struct domain *d,
         hap_track_dirty_vram(d, first_pfn, nr, buf.h);
 }
 
+static int dm_op_set_pci_intx_level(struct domain *d, uint8_t domain,
+                                    uint8_t bus, uint8_t device,
+                                    uint8_t intx, uint8_t level)
+{
+    if ( domain != 0 || bus != 0 || device > 0x1f || intx > 3 )
+        return -EINVAL;
+
+    switch ( level )
+    {
+    case 0:
+        hvm_pci_intx_deassert(d, device, intx);
+        break;
+    case 1:
+        hvm_pci_intx_assert(d, device, intx);
+        break;
+    default:
+        return -EINVAL;
+    }
+
+    return 0;
+}
+
+static int dm_op_set_isa_irq_level(struct domain *d, uint8_t isa_irq,
+                                   uint8_t level)
+{
+    if ( isa_irq > 15 )
+        return -EINVAL;
+
+    switch ( level )
+    {
+    case 0:
+        hvm_isa_irq_deassert(d, isa_irq);
+        break;
+    case 1:
+        hvm_isa_irq_assert(d, isa_irq);
+        break;
+    default:
+        return -EINVAL;
+    }
+
+    return 0;
+}
+
+static int dm_op_set_pci_link_route(struct domain *d, uint8_t link,
+                                    uint8_t isa_irq)
+{
+    if ( link > 3 || isa_irq > 15 )
+        return -EINVAL;
+
+    hvm_set_pci_link_route(d, link, isa_irq);
+
+    return 0;
+}
+
 long do_dm_op(domid_t domid,
               unsigned int nr_bufs,
               XEN_GUEST_HANDLE_PARAM(xen_dm_op_buf_t) bufs)
@@ -197,6 +251,32 @@ long do_dm_op(domid_t domid,
                                     data->nr);
         break;
     }
+    case DMOP_set_pci_intx_level:
+    {
+        struct xen_dm_op_set_pci_intx_level *data =
+            &op.u.set_pci_intx_level;
+
+        rc = dm_op_set_pci_intx_level(d, data->domain, data->bus,
+                                      data->device, data->intx,
+                                      data->level);
+        break;
+    }
+    case DMOP_set_isa_irq_level:
+    {
+        struct xen_dm_op_set_isa_irq_level *data =
+            &op.u.set_isa_irq_level;
+
+        rc = dm_op_set_isa_irq_level(d, data->isa_irq, data->level);
+        break;
+    }
+    case DMOP_set_pci_link_route:
+    {
+        struct xen_dm_op_set_pci_link_route *data =
+            &op.u.set_pci_link_route;
+
+        rc = dm_op_set_pci_link_route(d, data->link, data->isa_irq);
+        break;
+    }
     default:
         rc = -EOPNOTSUPP;
         break;
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 0ca9ca0..14d3b87 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4455,50 +4455,6 @@ void hvm_hypercall_page_initialise(struct domain *d,
     hvm_funcs.init_hypercall_page(d, hypercall_page);
 }
 
-static int hvmop_set_pci_intx_level(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_set_pci_intx_level_t) uop)
-{
-    struct xen_hvm_set_pci_intx_level op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    if ( (op.domain > 0) || (op.bus > 0) || (op.device > 31) || (op.intx > 3) )
-        return -EINVAL;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_set_pci_intx_level(XSM_DM_PRIV, d);
-    if ( rc )
-        goto out;
-
-    rc = 0;
-    switch ( op.level )
-    {
-    case 0:
-        hvm_pci_intx_deassert(d, op.device, op.intx);
-        break;
-    case 1:
-        hvm_pci_intx_assert(d, op.device, op.intx);
-        break;
-    default:
-        rc = -EINVAL;
-        break;
-    }
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
 void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip)
 {
     struct domain *d = v->domain;
@@ -4642,83 +4598,6 @@ static void hvm_s3_resume(struct domain *d)
     }
 }
 
-static int hvmop_set_isa_irq_level(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_set_isa_irq_level_t) uop)
-{
-    struct xen_hvm_set_isa_irq_level op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    if ( op.isa_irq > 15 )
-        return -EINVAL;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_set_isa_irq_level(XSM_DM_PRIV, d);
-    if ( rc )
-        goto out;
-
-    rc = 0;
-    switch ( op.level )
-    {
-    case 0:
-        hvm_isa_irq_deassert(d, op.isa_irq);
-        break;
-    case 1:
-        hvm_isa_irq_assert(d, op.isa_irq);
-        break;
-    default:
-        rc = -EINVAL;
-        break;
-    }
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-static int hvmop_set_pci_link_route(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_set_pci_link_route_t) uop)
-{
-    struct xen_hvm_set_pci_link_route op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    if ( (op.link > 3) || (op.isa_irq > 15) )
-        return -EINVAL;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_set_pci_link_route(XSM_DM_PRIV, d);
-    if ( rc )
-        goto out;
-
-    rc = 0;
-    hvm_set_pci_link_route(d, op.link, op.isa_irq);
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
 static int hvmop_inject_msi(
     XEN_GUEST_HANDLE_PARAM(xen_hvm_inject_msi_t) uop)
 {
@@ -5513,26 +5392,11 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
             guest_handle_cast(arg, xen_hvm_param_t));
         break;
 
-    case HVMOP_set_pci_intx_level:
-        rc = hvmop_set_pci_intx_level(
-            guest_handle_cast(arg, xen_hvm_set_pci_intx_level_t));
-        break;
-
-    case HVMOP_set_isa_irq_level:
-        rc = hvmop_set_isa_irq_level(
-            guest_handle_cast(arg, xen_hvm_set_isa_irq_level_t));
-        break;
-
     case HVMOP_inject_msi:
         rc = hvmop_inject_msi(
             guest_handle_cast(arg, xen_hvm_inject_msi_t));
         break;
 
-    case HVMOP_set_pci_link_route:
-        rc = hvmop_set_pci_link_route(
-            guest_handle_cast(arg, xen_hvm_set_pci_link_route_t));
-        break;
-
     case HVMOP_flush_tlbs:
         rc = guest_handle_is_null(arg) ? hvmop_flush_tlb_all() : -EINVAL;
         break;
diff --git a/xen/include/public/hvm/dm_op.h b/xen/include/public/hvm/dm_op.h
index c1557eb..b47014b 100644
--- a/xen/include/public/hvm/dm_op.h
+++ b/xen/include/public/hvm/dm_op.h
@@ -194,6 +194,45 @@ struct xen_dm_op_track_dirty_vram {
     uint64_aligned_t first_pfn;
 };
 
+/*
+ * DMOP_set_pci_intx_level: Set the logical level of one of a domain's
+ *                          PCI INTx pins.
+ */
+#define DMOP_set_pci_intx_level 8
+
+struct xen_dm_op_set_pci_intx_level {
+    /* IN - PCI INTx identification (domain:bus:device:intx) */
+    uint8_t  domain, bus, device, intx;
+    /* IN - Level: 0 -> deasserted, 1 -> asserted */
+    uint8_t  level;
+};
+
+/*
+ * DMOP_set_isa_irq_level: Set the logical level of a one of a domain's
+ *                         ISA IRQ lines.
+ */
+#define DMOP_set_isa_irq_level 9
+
+struct xen_dm_op_set_isa_irq_level {
+    /* IN - ISA IRQ (0-15) */
+    uint8_t  isa_irq;
+    /* IN - Level: 0 -> deasserted, 1 -> asserted */
+    uint8_t  level;
+};
+
+/*
+ * DMOP_set_pci_link_route: Map a PCI INTx line to an IRQ line.
+ */
+#define DMOP_set_pci_link_route 10
+
+struct xen_dm_op_set_pci_link_route {
+    /* PCI INTx line (0-3) */
+    uint8_t  link;
+    /* ISA IRQ (1-15) or 0 -> disable link */
+    uint8_t  isa_irq;
+};
+
+
 struct xen_dm_op {
     uint32_t op;
     union {
@@ -204,6 +243,9 @@ struct xen_dm_op {
         struct xen_dm_op_set_ioreq_server_state set_ioreq_server_state;
         struct xen_dm_op_destroy_ioreq_server destroy_ioreq_server;
         struct xen_dm_op_track_dirty_vram track_dirty_vram;
+        struct xen_dm_op_set_pci_intx_level set_pci_intx_level;
+        struct xen_dm_op_set_isa_irq_level set_isa_irq_level;
+        struct xen_dm_op_set_pci_link_route set_pci_link_route;
     } u;
 };
 
diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
index 1bb5221..1b9e3e0 100644
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -39,6 +39,8 @@ struct xen_hvm_param {
 typedef struct xen_hvm_param xen_hvm_param_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t);
 
+#if __XEN_INTERFACE_VERSION__ < 0x00040900
+
 /* Set the logical level of one of a domain's PCI INTx wires. */
 #define HVMOP_set_pci_intx_level  2
 struct xen_hvm_set_pci_intx_level {
@@ -77,6 +79,8 @@ struct xen_hvm_set_pci_link_route {
 typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t);
 
+#endif /* __XEN_INTERFACE_VERSION__ < 0x00040900 */
+
 /* Flushes all VCPU TLBs: @arg must be NULL. */
 #define HVMOP_flush_tlbs          5
 
diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index b7d3173..47c6072 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -610,24 +610,6 @@ static XSM_INLINE int xsm_shadow_control(XSM_DEFAULT_ARG 
struct domain *d, uint3
     return xsm_default_action(action, current->domain, d);
 }
 
-static XSM_INLINE int xsm_hvm_set_pci_intx_level(XSM_DEFAULT_ARG struct domain 
*d)
-{
-    XSM_ASSERT_ACTION(XSM_DM_PRIV);
-    return xsm_default_action(action, current->domain, d);
-}
-
-static XSM_INLINE int xsm_hvm_set_isa_irq_level(XSM_DEFAULT_ARG struct domain 
*d)
-{
-    XSM_ASSERT_ACTION(XSM_DM_PRIV);
-    return xsm_default_action(action, current->domain, d);
-}
-
-static XSM_INLINE int xsm_hvm_set_pci_link_route(XSM_DEFAULT_ARG struct domain 
*d)
-{
-    XSM_ASSERT_ACTION(XSM_DM_PRIV);
-    return xsm_default_action(action, current->domain, d);
-}
-
 static XSM_INLINE int xsm_hvm_inject_msi(XSM_DEFAULT_ARG struct domain *d)
 {
     XSM_ASSERT_ACTION(XSM_DM_PRIV);
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index 0bcde39..cb32644 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -162,9 +162,6 @@ struct xsm_operations {
 #ifdef CONFIG_X86
     int (*do_mca) (void);
     int (*shadow_control) (struct domain *d, uint32_t op);
-    int (*hvm_set_pci_intx_level) (struct domain *d);
-    int (*hvm_set_isa_irq_level) (struct domain *d);
-    int (*hvm_set_pci_link_route) (struct domain *d);
     int (*hvm_inject_msi) (struct domain *d);
     int (*mem_sharing_op) (struct domain *d, struct domain *cd, int op);
     int (*apic) (struct domain *d, int cmd);
@@ -635,21 +632,6 @@ static inline int xsm_shadow_control (xsm_default_t def, 
struct domain *d, uint3
     return xsm_ops->shadow_control(d, op);
 }
 
-static inline int xsm_hvm_set_pci_intx_level (xsm_default_t def, struct domain 
*d)
-{
-    return xsm_ops->hvm_set_pci_intx_level(d);
-}
-
-static inline int xsm_hvm_set_isa_irq_level (xsm_default_t def, struct domain 
*d)
-{
-    return xsm_ops->hvm_set_isa_irq_level(d);
-}
-
-static inline int xsm_hvm_set_pci_link_route (xsm_default_t def, struct domain 
*d)
-{
-    return xsm_ops->hvm_set_pci_link_route(d);
-}
-
 static inline int xsm_hvm_inject_msi (xsm_default_t def, struct domain *d)
 {
     return xsm_ops->hvm_inject_msi(d);
diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
index d544ec1..f1568dd 100644
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -145,9 +145,6 @@ void __init xsm_fixup_ops (struct xsm_operations *ops)
 #ifdef CONFIG_X86
     set_to_dummy_if_null(ops, do_mca);
     set_to_dummy_if_null(ops, shadow_control);
-    set_to_dummy_if_null(ops, hvm_set_pci_intx_level);
-    set_to_dummy_if_null(ops, hvm_set_isa_irq_level);
-    set_to_dummy_if_null(ops, hvm_set_pci_link_route);
     set_to_dummy_if_null(ops, hvm_inject_msi);
     set_to_dummy_if_null(ops, mem_sharing_op);
     set_to_dummy_if_null(ops, apic);
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index 7972546..088aa87 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1499,21 +1499,6 @@ static int flask_ioport_mapping(struct domain *d, 
uint32_t start, uint32_t end,
     return flask_ioport_permission(d, start, end, access);
 }
 
-static int flask_hvm_set_pci_intx_level(struct domain *d)
-{
-    return current_has_perm(d, SECCLASS_HVM, HVM__PCILEVEL);
-}
-
-static int flask_hvm_set_isa_irq_level(struct domain *d)
-{
-    return current_has_perm(d, SECCLASS_HVM, HVM__IRQLEVEL);
-}
-
-static int flask_hvm_set_pci_link_route(struct domain *d)
-{
-    return current_has_perm(d, SECCLASS_HVM, HVM__PCIROUTE);
-}
-
 static int flask_hvm_inject_msi(struct domain *d)
 {
     return current_has_perm(d, SECCLASS_HVM, HVM__SEND_IRQ);
diff --git a/xen/xsm/flask/policy/access_vectors 
b/xen/xsm/flask/policy/access_vectors
index 5af427f..708cfe6 100644
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -257,12 +257,6 @@ class hvm
     setparam
 # HVMOP_get_param
     getparam
-# HVMOP_set_pci_intx_level (also needs hvmctl)
-    pcilevel
-# HVMOP_set_isa_irq_level
-    irqlevel
-# HVMOP_set_pci_link_route
-    pciroute
     bind_irq
 # XEN_DOMCTL_pin_mem_cacheattr
     cacheattr
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.