WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [HVM] Clarify ioreq interactions between

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [HVM] Clarify ioreq interactions between Xen and qemu-dm.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 10 Nov 2006 12:20:12 +0000
Delivery-date: Fri, 10 Nov 2006 04:20:03 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID 814fbacfafc63d6e187560142a33d2d0d031b09d
# Parent  357127ca8bf25f1f75bde8312976a9a34eb00b8d
[HVM] Clarify ioreq interactions between Xen and qemu-dm.

Mainly this involves placing appropriate barriers before
updating the shared state variable, and after reading from it. The
model is that it is state switches that drive the transfer of data to
and fro in the shared ioreq structure. So writers should wmb() before
updating the variable; readers should rmb() after seeing a state
change.

These barriers aren't actually required given the current code
structure since the communication flow is really driven by
event-channel notifications, which happen to provide a sufficient
barrier. However, relying on this for all time and on all
architectures seems foolish and the barriers have negligible cost
compared with the totoal ioreq round-trip latency. Also the model of
communications being driven by the shared-memory state variable more
closely matches other inter-domain protocols (where there is usually a
shared-memory producer index), and is hence a model we are familiar
with and likely to implement correctly.

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 tools/ioemu/target-i386-dm/helper2.c |   42 ++++++++++++++++++++---------------
 xen/arch/x86/hvm/io.c                |    2 +
 xen/arch/x86/hvm/platform.c          |   30 ++++++++++++++++---------
 3 files changed, 47 insertions(+), 27 deletions(-)

diff -r 357127ca8bf2 -r 814fbacfafc6 tools/ioemu/target-i386-dm/helper2.c
--- a/tools/ioemu/target-i386-dm/helper2.c      Fri Nov 10 09:50:35 2006 +0000
+++ b/tools/ioemu/target-i386-dm/helper2.c      Fri Nov 10 10:30:38 2006 +0000
@@ -209,18 +209,19 @@ static ioreq_t *__cpu_get_ioreq(int vcpu
 
     req = &(shared_page->vcpu_iodata[vcpu].vp_ioreq);
 
-    if (req->state == STATE_IOREQ_READY) {
-        req->state = STATE_IOREQ_INPROCESS;
-        rmb();
-        return req;
-    }
-
-    fprintf(logfile, "False I/O request ... in-service already: "
-            "%x, ptr: %x, port: %"PRIx64", "
-            "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n",
-            req->state, req->data_is_ptr, req->addr,
-            req->data, req->count, req->size);
-    return NULL;
+    if (req->state != STATE_IOREQ_READY) {
+        fprintf(logfile, "I/O request not ready: "
+                "%x, ptr: %x, port: %"PRIx64", "
+                "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n",
+                req->state, req->data_is_ptr, req->addr,
+                req->data, req->count, req->size);
+        return NULL;
+    }
+
+    rmb(); /* see IOREQ_READY /then/ read contents of ioreq */
+
+    req->state = STATE_IOREQ_INPROCESS;
+    return req;
 }
 
 //use poll to get the port notification
@@ -504,12 +505,19 @@ void cpu_handle_ioreq(void *opaque)
     if (req) {
         __handle_ioreq(env, req);
 
-        /* No state change if state = STATE_IORESP_HOOK */
-        if (req->state == STATE_IOREQ_INPROCESS) {
-            req->state = STATE_IORESP_READY;
-            xc_evtchn_notify(xce_handle, ioreq_local_port[send_vcpu]);
-        } else
+        if (req->state != STATE_IOREQ_INPROCESS) {
+            fprintf(logfile, "Badness in I/O request ... not in service?!: "
+                    "%x, ptr: %x, port: %"PRIx64", "
+                    "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n",
+                    req->state, req->data_is_ptr, req->addr,
+                    req->data, req->count, req->size);
             destroy_hvm_domain();
+            return;
+        }
+
+        wmb(); /* Update ioreq contents /then/ update state. */
+        req->state = STATE_IORESP_READY;
+        xc_evtchn_notify(xce_handle, ioreq_local_port[send_vcpu]);
     }
 }
 
diff -r 357127ca8bf2 -r 814fbacfafc6 xen/arch/x86/hvm/io.c
--- a/xen/arch/x86/hvm/io.c     Fri Nov 10 09:50:35 2006 +0000
+++ b/xen/arch/x86/hvm/io.c     Fri Nov 10 10:30:38 2006 +0000
@@ -745,6 +745,8 @@ void hvm_io_assist(struct vcpu *v)
         domain_crash_synchronous();
     }
 
+    rmb(); /* see IORESP_READY /then/ read contents of ioreq */
+
     p->state = STATE_IOREQ_NONE;
 
     if ( p->type == IOREQ_TYPE_PIO )
diff -r 357127ca8bf2 -r 814fbacfafc6 xen/arch/x86/hvm/platform.c
--- a/xen/arch/x86/hvm/platform.c       Fri Nov 10 09:50:35 2006 +0000
+++ b/xen/arch/x86/hvm/platform.c       Fri Nov 10 10:30:38 2006 +0000
@@ -727,15 +727,20 @@ static void hvm_send_assist_req(struct v
     ioreq_t *p;
 
     p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
-    if ( unlikely(p->state != STATE_IOREQ_NONE) ) {
-        /* This indicates a bug in the device model.  Crash the
-           domain. */
-        printk("Device model set bad IO state %d.\n", p->state);
+    if ( unlikely(p->state != STATE_IOREQ_NONE) )
+    {
+        /* This indicates a bug in the device model.  Crash the domain. */
+        gdprintk(XENLOG_ERR, "Device model set bad IO state %d.\n", p->state);
         domain_crash(v->domain);
         return;
     }
 
     prepare_wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port);
+
+    /*
+     * Following happens /after/ blocking and setting up ioreq contents.
+     * prepare_wait_on_xen_event_channel() is an implicit barrier.
+     */
     p->state = STATE_IOREQ_READY;
     notify_via_xen_event_channel(v->arch.hvm_vcpu.xen_port);
 }
@@ -781,10 +786,12 @@ void send_pio_req(unsigned long port, un
             p->data = shadow_gva_to_gpa(current, value);
         else
             p->data = value; /* guest VA == guest PA */
-    } else if ( dir == IOREQ_WRITE )
+    }
+    else if ( dir == IOREQ_WRITE )
         p->data = value;
 
-    if ( hvm_portio_intercept(p) ) {
+    if ( hvm_portio_intercept(p) )
+    {
         p->state = STATE_IORESP_READY;
         hvm_io_assist(v);
         return;
@@ -829,15 +836,18 @@ static void send_mmio_req(unsigned char 
 
     p->io_count++;
 
-    if (value_is_ptr) {
-        if (hvm_paging_enabled(v))
+    if ( value_is_ptr )
+    {
+        if ( hvm_paging_enabled(v) )
             p->data = shadow_gva_to_gpa(v, value);
         else
             p->data = value; /* guest VA == guest PA */
-    } else
+    }
+    else
         p->data = value;
 
-    if ( hvm_mmio_intercept(p) || hvm_buffered_io_intercept(p) ) {
+    if ( hvm_mmio_intercept(p) || hvm_buffered_io_intercept(p) )
+    {
         p->state = STATE_IORESP_READY;
         hvm_io_assist(v);
         return;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [HVM] Clarify ioreq interactions between Xen and qemu-dm., Xen patchbot-unstable <=