[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH V2 20/26] passthrough: move some fields of hvm_gmsi_info to a sub-structure



From: Chao Gao <chao.gao@xxxxxxxxx>

No functional change. It is a preparation for introducing new fields in
hvm_gmsi_info to manage remapping format msi bound to a physical msi.

Signed-off-by: Chao Gao <chao.gao@xxxxxxxxx>
Signed-off-by: Lan Tianyu <tianyu.lan@xxxxxxxxx>
---
 xen/arch/x86/hvm/vmsi.c      |  4 ++--
 xen/drivers/passthrough/io.c | 32 ++++++++++++++++----------------
 xen/include/xen/hvm/irq.h    |  8 ++++++--
 3 files changed, 24 insertions(+), 20 deletions(-)

diff --git a/xen/arch/x86/hvm/vmsi.c b/xen/arch/x86/hvm/vmsi.c
index a36692c..c4ec0ad 100644
--- a/xen/arch/x86/hvm/vmsi.c
+++ b/xen/arch/x86/hvm/vmsi.c
@@ -101,8 +101,8 @@ int vmsi_deliver(
 
 void vmsi_deliver_pirq(struct domain *d, const struct hvm_pirq_dpci *pirq_dpci)
 {
-    uint32_t flags = pirq_dpci->gmsi.gflags;
-    int vector = pirq_dpci->gmsi.gvec;
+    uint32_t flags = pirq_dpci->gmsi.legacy.gflags;
+    int vector = pirq_dpci->gmsi.legacy.gvec;
     uint8_t dest = (uint8_t)flags;
     uint8_t dest_mode = !!(flags & VMSI_DM_MASK);
     uint8_t delivery_mode = (flags & VMSI_DELIV_MASK)
diff --git a/xen/drivers/passthrough/io.c b/xen/drivers/passthrough/io.c
index e5a43e5..2158a11 100644
--- a/xen/drivers/passthrough/io.c
+++ b/xen/drivers/passthrough/io.c
@@ -325,8 +325,8 @@ int pt_irq_create_bind(
         {
             pirq_dpci->flags = HVM_IRQ_DPCI_MAPPED | HVM_IRQ_DPCI_MACH_MSI |
                                HVM_IRQ_DPCI_GUEST_MSI;
-            pirq_dpci->gmsi.gvec = pt_irq_bind->u.msi.gvec;
-            pirq_dpci->gmsi.gflags = pt_irq_bind->u.msi.gflags;
+            pirq_dpci->gmsi.legacy.gvec = pt_irq_bind->u.msi.gvec;
+            pirq_dpci->gmsi.legacy.gflags = pt_irq_bind->u.msi.gflags;
             /*
              * 'pt_irq_create_bind' can be called after 'pt_irq_destroy_bind'.
              * The 'pirq_cleanup_check' which would free the structure is only
@@ -358,8 +358,8 @@ int pt_irq_create_bind(
             }
             if ( unlikely(rc) )
             {
-                pirq_dpci->gmsi.gflags = 0;
-                pirq_dpci->gmsi.gvec = 0;
+                pirq_dpci->gmsi.legacy.gflags = 0;
+                pirq_dpci->gmsi.legacy.gvec = 0;
                 pirq_dpci->dom = NULL;
                 pirq_dpci->flags = 0;
                 pirq_cleanup_check(info, d);
@@ -378,20 +378,20 @@ int pt_irq_create_bind(
             }
 
             /* If pirq is already mapped as vmsi, update guest data/addr. */
-            if ( pirq_dpci->gmsi.gvec != pt_irq_bind->u.msi.gvec ||
-                 pirq_dpci->gmsi.gflags != pt_irq_bind->u.msi.gflags )
+            if ( pirq_dpci->gmsi.legacy.gvec != pt_irq_bind->u.msi.gvec ||
+                 pirq_dpci->gmsi.legacy.gflags != pt_irq_bind->u.msi.gflags )
             {
                 /* Directly clear pending EOIs before enabling new MSI info. */
                 pirq_guest_eoi(info);
 
-                pirq_dpci->gmsi.gvec = pt_irq_bind->u.msi.gvec;
-                pirq_dpci->gmsi.gflags = pt_irq_bind->u.msi.gflags;
+                pirq_dpci->gmsi.legacy.gvec = pt_irq_bind->u.msi.gvec;
+                pirq_dpci->gmsi.legacy.gflags = pt_irq_bind->u.msi.gflags;
             }
         }
         /* Calculate dest_vcpu_id for MSI-type pirq migration. */
-        dest = pirq_dpci->gmsi.gflags & VMSI_DEST_ID_MASK;
-        dest_mode = !!(pirq_dpci->gmsi.gflags & VMSI_DM_MASK);
-        delivery_mode = (pirq_dpci->gmsi.gflags & VMSI_DELIV_MASK) >>
+        dest = pirq_dpci->gmsi.legacy.gflags & VMSI_DEST_ID_MASK;
+        dest_mode = !!(pirq_dpci->gmsi.legacy.gflags & VMSI_DM_MASK);
+        delivery_mode = (pirq_dpci->gmsi.legacy.gflags & VMSI_DELIV_MASK) >>
                          GFLAGS_SHIFT_DELIV_MODE;
 
         dest_vcpu_id = hvm_girq_dest_2_vcpu_id(d, dest, dest_mode);
@@ -404,7 +404,7 @@ int pt_irq_create_bind(
         {
             if ( delivery_mode == dest_LowestPrio )
                 vcpu = vector_hashing_dest(d, dest, dest_mode,
-                                           pirq_dpci->gmsi.gvec);
+                                           pirq_dpci->gmsi.legacy.gvec);
             if ( vcpu )
                 pirq_dpci->gmsi.posted = true;
         }
@@ -414,7 +414,7 @@ int pt_irq_create_bind(
         /* Use interrupt posting if it is supported. */
         if ( iommu_intpost )
             pi_update_irte(vcpu ? &vcpu->arch.hvm_vmx.pi_desc : NULL,
-                           info, pirq_dpci->gmsi.gvec);
+                           info, pirq_dpci->gmsi.legacy.gvec);
 
         break;
     }
@@ -729,10 +729,10 @@ static int _hvm_dpci_msi_eoi(struct domain *d,
     int vector = (long)arg;
 
     if ( (pirq_dpci->flags & HVM_IRQ_DPCI_MACH_MSI) &&
-         (pirq_dpci->gmsi.gvec == vector) )
+         (pirq_dpci->gmsi.legacy.gvec == vector) )
     {
-        int dest = pirq_dpci->gmsi.gflags & VMSI_DEST_ID_MASK;
-        int dest_mode = !!(pirq_dpci->gmsi.gflags & VMSI_DM_MASK);
+        int dest = pirq_dpci->gmsi.legacy.gflags & VMSI_DEST_ID_MASK;
+        int dest_mode = !!(pirq_dpci->gmsi.legacy.gflags & VMSI_DM_MASK);
 
         if ( vlapic_match_dest(vcpu_vlapic(current), NULL, 0, dest,
                                dest_mode) )
diff --git a/xen/include/xen/hvm/irq.h b/xen/include/xen/hvm/irq.h
index 671a6f2..5f8e2f4 100644
--- a/xen/include/xen/hvm/irq.h
+++ b/xen/include/xen/hvm/irq.h
@@ -60,8 +60,12 @@ struct dev_intx_gsi_link {
 #define GFLAGS_SHIFT_TRG_MODE       15
 
 struct hvm_gmsi_info {
-    uint32_t gvec;
-    uint32_t gflags;
+    union {
+        struct {
+            uint32_t gvec;
+            uint32_t gflags;
+        } legacy;
+    };
     int dest_vcpu_id; /* -1 :multi-dest, non-negative: dest_vcpu_id */
     bool posted; /* directly deliver to guest via VT-d PI? */
 };
-- 
1.8.3.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.