[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH SpectreV1+L1TF v4 08/11] xen/evtchn: block speculative out-of-bound accesses



Guests can issue event channel interaction with guest specified data.
To avoid speculative out-of-bound accesses, we use the nospec macros.

This commit is part of the SpectreV1+L1TF mitigation patch series.

Signed-off-by: Norbert Manthey <nmanthey@xxxxxxxxx>

---
 xen/common/event_channel.c | 25 ++++++++++++++++++++-----
 xen/common/event_fifo.c    | 16 +++++++++++++---
 xen/include/xen/event.h    |  5 +++--
 3 files changed, 36 insertions(+), 10 deletions(-)

diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -368,8 +368,14 @@ int evtchn_bind_virq(evtchn_bind_virq_t *bind, 
evtchn_port_t port)
     if ( virq_is_global(virq) && (vcpu != 0) )
         return -EINVAL;
 
+   /*
+    * Make sure the guest controlled value virq is bounded even during
+    * speculative execution.
+    */
+    virq = array_index_nospec(virq, ARRAY_SIZE(v->virq_to_evtchn));
+
     if ( (vcpu < 0) || (vcpu >= d->max_vcpus) ||
-         ((v = d->vcpu[vcpu]) == NULL) )
+         ((v = d->vcpu[array_index_nospec(vcpu, d->max_vcpus)]) == NULL) )
         return -ENOENT;
 
     spin_lock(&d->event_lock);
@@ -419,7 +425,7 @@ static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)
     long           rc = 0;
 
     if ( (vcpu < 0) || (vcpu >= d->max_vcpus) ||
-         (d->vcpu[vcpu] == NULL) )
+         (d->vcpu[array_index_nospec(vcpu, d->max_vcpus)] == NULL) )
         return -ENOENT;
 
     spin_lock(&d->event_lock);
@@ -816,6 +822,12 @@ int set_global_virq_handler(struct domain *d, uint32_t 
virq)
     if (!virq_is_global(virq))
         return -EINVAL;
 
+   /*
+    * Make sure the guest controlled value virq is bounded even during
+    * speculative execution.
+    */
+    virq = array_index_nospec(virq, ARRAY_SIZE(global_virq_handlers));
+
     if (global_virq_handlers[virq] == d)
         return 0;
 
@@ -931,7 +943,8 @@ long evtchn_bind_vcpu(unsigned int port, unsigned int 
vcpu_id)
     struct evtchn *chn;
     long           rc = 0;
 
-    if ( (vcpu_id >= d->max_vcpus) || (d->vcpu[vcpu_id] == NULL) )
+    if ( (vcpu_id >= d->max_vcpus) ||
+         (d->vcpu[array_index_nospec(vcpu_id, d->max_vcpus)] == NULL) )
         return -ENOENT;
 
     spin_lock(&d->event_lock);
@@ -969,8 +982,10 @@ long evtchn_bind_vcpu(unsigned int port, unsigned int 
vcpu_id)
         unlink_pirq_port(chn, d->vcpu[chn->notify_vcpu_id]);
         chn->notify_vcpu_id = vcpu_id;
         pirq_set_affinity(d, chn->u.pirq.irq,
-                          cpumask_of(d->vcpu[vcpu_id]->processor));
-        link_pirq_port(port, chn, d->vcpu[vcpu_id]);
+                          cpumask_of(d->vcpu[array_index_nospec(vcpu_id,
+                                                                
d->max_vcpus)]->processor));
+        link_pirq_port(port, chn, d->vcpu[array_index_nospec(vcpu_id,
+                                                             d->max_vcpus)]);
         break;
     default:
         rc = -EINVAL;
diff --git a/xen/common/event_fifo.c b/xen/common/event_fifo.c
--- a/xen/common/event_fifo.c
+++ b/xen/common/event_fifo.c
@@ -33,7 +33,8 @@ static inline event_word_t *evtchn_fifo_word_from_port(const 
struct domain *d,
      */
     smp_rmb();
 
-    p = port / EVTCHN_FIFO_EVENT_WORDS_PER_PAGE;
+    p = array_index_nospec(port / EVTCHN_FIFO_EVENT_WORDS_PER_PAGE,
+                           d->evtchn_fifo->num_evtchns);
     w = port % EVTCHN_FIFO_EVENT_WORDS_PER_PAGE;
 
     return d->evtchn_fifo->event_array[p] + w;
@@ -516,14 +517,23 @@ int evtchn_fifo_init_control(struct evtchn_init_control 
*init_control)
     gfn     = init_control->control_gfn;
     offset  = init_control->offset;
 
-    if ( vcpu_id >= d->max_vcpus || !d->vcpu[vcpu_id] )
+    if ( vcpu_id >= d->max_vcpus ||
+         !d->vcpu[array_index_nospec(vcpu_id, d->max_vcpus)] )
         return -ENOENT;
-    v = d->vcpu[vcpu_id];
+
+    v = d->vcpu[array_index_nospec(vcpu_id, d->max_vcpus)];
 
     /* Must not cross page boundary. */
     if ( offset > (PAGE_SIZE - sizeof(evtchn_fifo_control_block_t)) )
         return -EINVAL;
 
+    /*
+     * Make sure the guest controlled value offset is bounded even during
+     * speculative execution.
+     */
+    offset = array_index_nospec(offset,
+                              PAGE_SIZE - sizeof(evtchn_fifo_control_block_t));
+
     /* Must be 8-bytes aligned. */
     if ( offset & (8 - 1) )
         return -EINVAL;
diff --git a/xen/include/xen/event.h b/xen/include/xen/event.h
--- a/xen/include/xen/event.h
+++ b/xen/include/xen/event.h
@@ -13,6 +13,7 @@
 #include <xen/smp.h>
 #include <xen/softirq.h>
 #include <xen/bitops.h>
+#include <xen/nospec.h>
 #include <asm/event.h>
 
 /*
@@ -96,7 +97,7 @@ void arch_evtchn_inject(struct vcpu *v);
  * The first bucket is directly accessed via d->evtchn.
  */
 #define group_from_port(d, p) \
-    ((d)->evtchn_group[(p) / EVTCHNS_PER_GROUP])
+    array_access_nospec((d)->evtchn_group, (p) / EVTCHNS_PER_GROUP)
 #define bucket_from_port(d, p) \
     ((group_from_port(d, p))[((p) % EVTCHNS_PER_GROUP) / EVTCHNS_PER_BUCKET])
 
@@ -110,7 +111,7 @@ static inline bool_t port_is_valid(struct domain *d, 
unsigned int p)
 static inline struct evtchn *evtchn_from_port(struct domain *d, unsigned int p)
 {
     if ( p < EVTCHNS_PER_BUCKET )
-        return &d->evtchn[p];
+        return &d->evtchn[array_index_nospec(p, EVTCHNS_PER_BUCKET)];
     return bucket_from_port(d, p) + (p % EVTCHNS_PER_BUCKET);
 }
 
-- 
2.7.4




Amazon Development Center Germany GmbH
Krausenstr. 38
10117 Berlin
Geschaeftsfuehrer: Christian Schlaeger, Ralf Herbrich
Ust-ID: DE 289 237 879
Eingetragen am Amtsgericht Charlottenburg HRB 149173 B



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.