[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v3 3/7] x86/hvm: Support extended destination IDs in virtual MSI and IO-APIC



Use the newly defined masks to extract the full 15-bit destination ID
from guest MSI addresses and IO-APIC RTEs. In hvm_inject_msi() combine
the standard bits [19:12] with the extended bits [11:5] of the MSI
address into a 15-bit destination ID for LAPIC delivery. Increase the
dest parameter of vmsi_deliver() and hvm_girq_dest_2_vcpu_id() from
uint8_t to uint32_t. In vmsi_deliver_pirq() extract the full destination
from gflags via XEN_DOMCTL_VMSI_X86_FULL_DEST(). In msi_gflags() pack
the extended bits from the MSI address into the new
XEN_DOMCTL_VMSI_X86_EXT_DEST_ID_MASK field of gflags. In
vioapic_deliver() read the combined 15-bit destination using the
VIOAPIC_RTE_DEST() macro. Extend ioapic_check() to check for extended
destination bits set in a domain that does not advertise
XEN_HVM_CPUID_EXT_DEST_ID and refuse to restore the IO-APIC state,
preventing silent interrupt misrouting after live migration.

Signed-off-by: Julian Vetter <julian.vetter@xxxxxxxxxx>
---
Changes in V3:
- Added additional check to the vioapic_check that makes sure that the
  extended bits are not set for domains that don't support it
- Addressed comments from Jan -> Replaced all constants by a proper define
---
 xen/arch/x86/hvm/irq.c             | 11 ++++++++++-
 xen/arch/x86/hvm/vioapic.c         | 21 +++++++++++++++++++--
 xen/arch/x86/hvm/vmsi.c            |  8 +++++---
 xen/arch/x86/include/asm/hvm/hvm.h |  4 ++--
 4 files changed, 36 insertions(+), 8 deletions(-)

diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c
index 5f64361113..b520fc1150 100644
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -374,7 +374,16 @@ int hvm_set_pci_link_route(struct domain *d, u8 link, u8 
isa_irq)
 int hvm_inject_msi(struct domain *d, uint64_t addr, uint32_t data)
 {
     uint32_t tmp = (uint32_t) addr;
-    uint8_t  dest = (tmp & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
+    /*
+     * Standard MSI destination address bits 19:12 (8 bits).
+     * Extended MSI destination address bits 11:5 (7 more bits).
+     *
+     * As XEN_HVM_CPUID_EXT_DEST_ID is advertised, the guest may use bits 11:5
+     * for high destination ID bits, expanding to 15 bits total. Guests unaware
+     * of this feature set these bits to 0, so this is backwards-compatible.
+     */
+    uint32_t dest = (MASK_EXTR(tmp, MSI_ADDR_EXT_DEST_ID_MASK) << 
MSI_ADDR_DEST_ID_BITS) |
+                     MASK_EXTR(tmp, MSI_ADDR_DEST_ID_MASK);
     uint8_t  dest_mode = !!(tmp & MSI_ADDR_DESTMODE_MASK);
     uint8_t  delivery_mode = (data & MSI_DATA_DELIVERY_MODE_MASK)
         >> MSI_DATA_DELIVERY_MODE_SHIFT;
diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c
index d7a4105a57..9602572dc4 100644
--- a/xen/arch/x86/hvm/vioapic.c
+++ b/xen/arch/x86/hvm/vioapic.c
@@ -411,7 +411,9 @@ static void ioapic_inj_irq(
 
 static void vioapic_deliver(struct hvm_vioapic *vioapic, unsigned int pin)
 {
-    uint16_t dest = vioapic->redirtbl[pin].fields.dest_id;
+    uint32_t dest = ((uint32_t)vioapic->redirtbl[pin].fields.ext_dest_id <<
+                     VIOAPIC_RTE_DEST_ID_UPPER_BITS) |
+                    vioapic->redirtbl[pin].fields.dest_id;
     uint8_t dest_mode = vioapic->redirtbl[pin].fields.dest_mode;
     uint8_t delivery_mode = vioapic->redirtbl[pin].fields.delivery_mode;
     uint8_t vector = vioapic->redirtbl[pin].fields.vector;
@@ -618,6 +620,21 @@ static int cf_check ioapic_check(const struct domain *d, 
hvm_domain_context_t *h
              e->fields.reserved[0] || e->fields.reserved[1] ||
              e->fields.reserved[2] || e->fields.reserved2 )
             return -EINVAL;
+
+        /*
+         * An RTE in the saved state has ext_dest_id bits set. Check that
+         * the destination Xen has extended destination ID support enabled,
+         * otherwise interrupt routing to APIC IDs > 255 would be broken
+         * after restore.
+         */
+        if ( e->fields.ext_dest_id && !d->arch.hvm.ext_dest_id_enabled )
+        {
+            printk(XENLOG_G_ERR "HVM restore: %pd IO-APIC RTE %u has "
+                                "extended destination ID bits set but "
+                                "XEN_HVM_CPUID_EXT_DEST_ID is not enabled\n",
+                                d, i);
+            return -EINVAL;
+        }
     }
 
     return 0;
@@ -659,7 +676,7 @@ static int cf_check ioapic_load(struct domain *d, 
hvm_domain_context_t *h)
     return 0;
 }
 
-HVM_REGISTER_SAVE_RESTORE(IOAPIC, ioapic_save, NULL, ioapic_load, 1,
+HVM_REGISTER_SAVE_RESTORE(IOAPIC, ioapic_save, ioapic_check, ioapic_load, 1,
                           HVMSR_PER_DOM);
 
 void vioapic_reset(struct domain *d)
diff --git a/xen/arch/x86/hvm/vmsi.c b/xen/arch/x86/hvm/vmsi.c
index 27b1f089e2..36ea898ac7 100644
--- a/xen/arch/x86/hvm/vmsi.c
+++ b/xen/arch/x86/hvm/vmsi.c
@@ -66,7 +66,7 @@ static void vmsi_inj_irq(
 
 int vmsi_deliver(
     struct domain *d, int vector,
-    uint8_t dest, uint8_t dest_mode,
+    uint32_t dest, uint8_t dest_mode,
     uint8_t delivery_mode, uint8_t trig_mode)
 {
     struct vlapic *target;
@@ -109,7 +109,7 @@ void vmsi_deliver_pirq(struct domain *d, const struct 
hvm_pirq_dpci *pirq_dpci)
 {
     uint32_t flags = pirq_dpci->gmsi.gflags;
     int vector = pirq_dpci->gmsi.gvec;
-    uint8_t dest = (uint8_t)flags;
+    uint32_t dest = XEN_DOMCTL_VMSI_X86_FULL_DEST(flags);
     bool dest_mode = flags & XEN_DOMCTL_VMSI_X86_DM_MASK;
     uint8_t delivery_mode = MASK_EXTR(flags, XEN_DOMCTL_VMSI_X86_DELIV_MASK);
     bool trig_mode = flags & XEN_DOMCTL_VMSI_X86_TRIG_MASK;
@@ -125,7 +125,7 @@ void vmsi_deliver_pirq(struct domain *d, const struct 
hvm_pirq_dpci *pirq_dpci)
 }
 
 /* Return value, -1 : multi-dests, non-negative value: dest_vcpu_id */
-int hvm_girq_dest_2_vcpu_id(struct domain *d, uint8_t dest, uint8_t dest_mode)
+int hvm_girq_dest_2_vcpu_id(struct domain *d, uint32_t dest, uint8_t dest_mode)
 {
     int dest_vcpu_id = -1, w = 0;
     struct vcpu *v;
@@ -802,6 +802,8 @@ static unsigned int msi_gflags(uint16_t data, uint64_t 
addr, bool masked)
      */
     return MASK_INSR(MASK_EXTR(addr, MSI_ADDR_DEST_ID_MASK),
                      XEN_DOMCTL_VMSI_X86_DEST_ID_MASK) |
+           MASK_INSR(MASK_EXTR(addr, MSI_ADDR_EXT_DEST_ID_MASK),
+                     XEN_DOMCTL_VMSI_X86_EXT_DEST_ID_MASK) |
            MASK_INSR(MASK_EXTR(addr, MSI_ADDR_REDIRECTION_MASK),
                      XEN_DOMCTL_VMSI_X86_RH_MASK) |
            MASK_INSR(MASK_EXTR(addr, MSI_ADDR_DESTMODE_MASK),
diff --git a/xen/arch/x86/include/asm/hvm/hvm.h 
b/xen/arch/x86/include/asm/hvm/hvm.h
index 7d9774df59..11256d5e67 100644
--- a/xen/arch/x86/include/asm/hvm/hvm.h
+++ b/xen/arch/x86/include/asm/hvm/hvm.h
@@ -295,11 +295,11 @@ uint64_t hvm_get_guest_time_fixed(const struct vcpu *v, 
uint64_t at_tsc);
 
 int vmsi_deliver(
     struct domain *d, int vector,
-    uint8_t dest, uint8_t dest_mode,
+    uint32_t dest, uint8_t dest_mode,
     uint8_t delivery_mode, uint8_t trig_mode);
 struct hvm_pirq_dpci;
 void vmsi_deliver_pirq(struct domain *d, const struct hvm_pirq_dpci 
*pirq_dpci);
-int hvm_girq_dest_2_vcpu_id(struct domain *d, uint8_t dest, uint8_t dest_mode);
+int hvm_girq_dest_2_vcpu_id(struct domain *d, uint32_t dest, uint8_t 
dest_mode);
 
 enum hvm_intblk
 hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack);
-- 
2.51.0



--
Julian Vetter | Vates Hypervisor & Kernel Developer

XCP-ng & Xen Orchestra - Vates solutions

web: https://vates.tech




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.