[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2/2] simplify other functions



As a follow-up we can now also remove the struct domain * parameter
from the guest EOI functions.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>

--- a/xen/arch/ia64/xen/hypercall.c
+++ b/xen/arch/ia64/xen/hypercall.c
@@ -70,7 +70,7 @@ static long __do_pirq_guest_eoi(struct d
                evtchn_unmask(pirq_to_evtchn(d, pirq));
                spin_unlock(&d->event_lock);
        }
-       return pirq_guest_eoi(d, pirq);
+       return pirq_guest_eoi(pirq);
 }
 
 long do_pirq_guest_eoi(int pirq)
--- a/xen/arch/ia64/xen/irq.c
+++ b/xen/arch/ia64/xen/irq.c
@@ -425,7 +425,7 @@ static int pirq_acktype(int irq)
     return ACKTYPE_NONE;
 }
 
-int pirq_guest_eoi(struct domain *d, struct pirq *pirq)
+int pirq_guest_eoi(struct pirq *pirq)
 {
     irq_desc_t *desc;
     irq_guest_action_t *action;
@@ -466,7 +466,7 @@ int pirq_guest_unmask(struct domain *d)
             pirq = pirqs[i]->pirq;
             if ( pirqs[i]->masked &&
                  !test_bit(pirqs[i]->evtchn, &s->evtchn_mask[0]) )
-            pirq_guest_eoi(d, pirqs[i]);
+            pirq_guest_eoi(pirqs[i]);
         }
     } while ( ++pirq < d->nr_pirqs && n == ARRAY_SIZE(pirqs) );
 
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -1142,17 +1142,17 @@ static void set_eoi_ready(void *data)
     flush_ready_eoi();
 }
 
-void pirq_guest_eoi(struct domain *d, struct pirq *pirq)
+void pirq_guest_eoi(struct pirq *pirq)
 {
     struct irq_desc *desc;
 
     ASSERT(local_irq_is_enabled());
     desc = pirq_spin_lock_irq_desc(pirq, NULL);
     if ( desc )
-        desc_guest_eoi(d, desc, pirq);
+        desc_guest_eoi(desc, pirq);
 }
 
-void desc_guest_eoi(struct domain *d, struct irq_desc *desc, struct pirq *pirq)
+void desc_guest_eoi(struct irq_desc *desc, struct pirq *pirq)
 {
     irq_guest_action_t *action;
     cpumask_t           cpu_eoi_map;
@@ -1222,7 +1222,7 @@ int pirq_guest_unmask(struct domain *d)
             pirq = pirqs[i]->pirq;
             if ( pirqs[i]->masked &&
                  !test_bit(pirqs[i]->evtchn, &shared_info(d, evtchn_mask)) )
-                pirq_guest_eoi(d, pirqs[i]);
+                pirq_guest_eoi(pirqs[i]);
         }
     } while ( ++pirq < d->nr_pirqs && n == ARRAY_SIZE(pirqs) );
 
--- a/xen/arch/x86/physdev.c
+++ b/xen/arch/x86/physdev.c
@@ -271,7 +271,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
             evtchn_unmask(pirq->evtchn);
         if ( !is_hvm_domain(v->domain) ||
              pirq->arch.hvm.emuirq == IRQ_PT )
-            pirq_guest_eoi(v->domain, pirq);
+            pirq_guest_eoi(pirq);
         spin_unlock(&v->domain->event_lock);
         ret = 0;
         break;
--- a/xen/drivers/passthrough/io.c
+++ b/xen/drivers/passthrough/io.c
@@ -43,7 +43,7 @@ static int pt_irq_guest_eoi(struct domai
     {
         pirq_dpci->masked = 0;
         pirq_dpci->pending = 0;
-        pirq_guest_eoi(d, dpci_pirq(pirq_dpci));
+        pirq_guest_eoi(dpci_pirq(pirq_dpci));
     }
 
     return 0;
@@ -181,7 +181,7 @@ int pt_irq_create_bind_vtd(
             if ( pirq_dpci->gmsi.gvec != pt_irq_bind->u.msi.gvec ||
                  pirq_dpci->gmsi.gflags != pt_irq_bind->u.msi.gflags) {
                 /* Directly clear pending EOIs before enabling new MSI info. */
-                pirq_guest_eoi(d, info);
+                pirq_guest_eoi(info);
 
                 pirq_dpci->gmsi.gvec = pt_irq_bind->u.msi.gvec;
                 pirq_dpci->gmsi.gflags = pt_irq_bind->u.msi.gflags;
@@ -421,7 +421,7 @@ int hvm_do_IRQ_dpci(struct domain *d, st
 
 #ifdef SUPPORT_MSI_REMAPPING
 /* called with d->event_lock held */
-static void __msi_pirq_eoi(struct domain *d, struct hvm_pirq_dpci *pirq_dpci)
+static void __msi_pirq_eoi(struct hvm_pirq_dpci *pirq_dpci)
 {
     irq_desc_t *desc;
 
@@ -436,7 +436,7 @@ static void __msi_pirq_eoi(struct domain
             return;
 
          desc->status &= ~IRQ_INPROGRESS;
-         desc_guest_eoi(d, desc, pirq);
+         desc_guest_eoi(desc, pirq);
     }
 }
 
@@ -454,7 +454,7 @@ static int _hvm_dpci_msi_eoi(struct doma
         if ( vlapic_match_dest(vcpu_vlapic(current), NULL, 0, dest,
                                dest_mode) )
         {
-            __msi_pirq_eoi(d, pirq_dpci);
+            __msi_pirq_eoi(pirq_dpci);
             return 1;
         }
     }
@@ -514,7 +514,7 @@ static int _hvm_dirq_assist(struct domai
             if ( pirq_dpci->flags & HVM_IRQ_DPCI_TRANSLATE )
             {
                 /* for translated MSI to INTx interrupt, eoi as early as 
possible */
-                __msi_pirq_eoi(d, pirq_dpci);
+                __msi_pirq_eoi(pirq_dpci);
             }
 #endif
         }
@@ -569,7 +569,7 @@ static void __hvm_dpci_eoi(struct domain
         return;
 
     stop_timer(&pirq_dpci->timer);
-    pirq_guest_eoi(d, pirq);
+    pirq_guest_eoi(pirq);
 }
 
 void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
--- a/xen/drivers/passthrough/vtd/x86/vtd.c
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c
@@ -84,7 +84,7 @@ static int _hvm_dpci_isairq_eoi(struct d
             if ( --pirq_dpci->pending == 0 )
             {
                 stop_timer(&pirq_dpci->timer);
-                pirq_guest_eoi(d, dpci_pirq(pirq_dpci));
+                pirq_guest_eoi(dpci_pirq(pirq_dpci));
             }
         }
     }
--- a/xen/include/xen/irq.h
+++ b/xen/include/xen/irq.h
@@ -162,8 +162,8 @@ void pirq_cleanup_check(struct pirq *, s
 #define pirq_cleanup_check(pirq, d) \
     ((pirq)->evtchn ? pirq_cleanup_check(pirq, d) : (void)0)
 
-extern void pirq_guest_eoi(struct domain *, struct pirq *);
-extern void desc_guest_eoi(struct domain *, struct irq_desc *, struct pirq *);
+extern void pirq_guest_eoi(struct pirq *);
+extern void desc_guest_eoi(struct irq_desc *, struct pirq *);
 extern int pirq_guest_unmask(struct domain *d);
 extern int pirq_guest_bind(struct vcpu *, struct pirq *, int will_share);
 extern void pirq_guest_unbind(struct domain *d, struct pirq *);


Attachment: guest_eoi-drop-domain.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.