[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v3 8/8] xen/evtchn: use READ/WRITE_ONCE() for accessing ring indices



For avoiding read- and write-tearing by the compiler use READ_ONCE()
and WRITE_ONCE() for accessing the ring indices in evtchn.c.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
V2:
- modify all accesses (Julien Grall)
V3:
- fix incrementing producer index (Ross Lagerwall)
---
 drivers/xen/evtchn.c | 25 ++++++++++++++++---------
 1 file changed, 16 insertions(+), 9 deletions(-)

diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 421382c73d88..c99415a70051 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -162,6 +162,7 @@ static irqreturn_t evtchn_interrupt(int irq, void *data)
 {
        struct user_evtchn *evtchn = data;
        struct per_user_data *u = evtchn->user;
+       unsigned int prod, cons;
 
        WARN(!evtchn->enabled,
             "Interrupt for port %u, but apparently not enabled; per-user %p\n",
@@ -171,10 +172,14 @@ static irqreturn_t evtchn_interrupt(int irq, void *data)
 
        spin_lock(&u->ring_prod_lock);
 
-       if ((u->ring_prod - u->ring_cons) < u->ring_size) {
-               *evtchn_ring_entry(u, u->ring_prod) = evtchn->port;
+       prod = READ_ONCE(u->ring_prod);
+       cons = READ_ONCE(u->ring_cons);
+
+       if ((prod - cons) < u->ring_size) {
+               *evtchn_ring_entry(u, prod) = evtchn->port;
                smp_wmb(); /* Ensure ring contents visible */
-               if (u->ring_cons == u->ring_prod++) {
+               WRITE_ONCE(u->ring_prod, prod + 1);
+               if (cons == prod) {
                        wake_up_interruptible(&u->evtchn_wait);
                        kill_fasync(&u->evtchn_async_queue,
                                    SIGIO, POLL_IN);
@@ -210,8 +215,8 @@ static ssize_t evtchn_read(struct file *file, char __user 
*buf,
                if (u->ring_overflow)
                        goto unlock_out;
 
-               c = u->ring_cons;
-               p = u->ring_prod;
+               c = READ_ONCE(u->ring_cons);
+               p = READ_ONCE(u->ring_prod);
                if (c != p)
                        break;
 
@@ -221,7 +226,7 @@ static ssize_t evtchn_read(struct file *file, char __user 
*buf,
                        return -EAGAIN;
 
                rc = wait_event_interruptible(u->evtchn_wait,
-                                             u->ring_cons != u->ring_prod);
+                       READ_ONCE(u->ring_cons) != READ_ONCE(u->ring_prod));
                if (rc)
                        return rc;
        }
@@ -251,7 +256,7 @@ static ssize_t evtchn_read(struct file *file, char __user 
*buf,
             copy_to_user(&buf[bytes1], &u->ring[0], bytes2)))
                goto unlock_out;
 
-       u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t);
+       WRITE_ONCE(u->ring_cons, c + (bytes1 + bytes2) / sizeof(evtchn_port_t));
        rc = bytes1 + bytes2;
 
  unlock_out:
@@ -552,7 +557,9 @@ static long evtchn_ioctl(struct file *file,
                /* Initialise the ring to empty. Clear errors. */
                mutex_lock(&u->ring_cons_mutex);
                spin_lock_irq(&u->ring_prod_lock);
-               u->ring_cons = u->ring_prod = u->ring_overflow = 0;
+               WRITE_ONCE(u->ring_cons, 0);
+               WRITE_ONCE(u->ring_prod, 0);
+               u->ring_overflow = 0;
                spin_unlock_irq(&u->ring_prod_lock);
                mutex_unlock(&u->ring_cons_mutex);
                rc = 0;
@@ -595,7 +602,7 @@ static __poll_t evtchn_poll(struct file *file, poll_table 
*wait)
        struct per_user_data *u = file->private_data;
 
        poll_wait(file, &u->evtchn_wait, wait);
-       if (u->ring_cons != u->ring_prod)
+       if (READ_ONCE(u->ring_cons) != READ_ONCE(u->ring_prod))
                mask |= EPOLLIN | EPOLLRDNORM;
        if (u->ring_overflow)
                mask = EPOLLERR;
-- 
2.26.2




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.