[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v6 12/12] microcode: update microcode on cores in parallel



microcode_update_lock and microcode_mutex prevent cores from updating
microcode in parallel. Below changes are made to support parallel
microcode update on cores.

microcode_update_lock is removed. This lock was to prevent logic threads
of a same core from updating microcode at the same time. But due to
using a global lock, it also prevented parallel microcode updating on
different cores. We ensure the siblings of a same core won't update
microcode at the caller level of apply_microcode() instead of inside
it:
1. For late microcode update, do_microcode_update() ensures that only
one sibiling thread of a core can update microcode.
2. For microcode update during system startup or CPU-hotplug, the new
early_ucode_update_lock() can guarantee update serialization of logical
threads. We use this sub-optimal method because it is hard to implement a
per-core lock at that early stage where sibling info isn't initialized yet.
3. get/put_cpu_bitmaps() prevents the concurrency of CPU-hotplug and
late microcode update.

microcode_mutex is replaced by a rwlock. microcode_mutex was used to
prevent concurrent accesses to 'uci' (already removed in previous patch)
and microcode_cache. Now the only shared resource which needs to be
protected is the microcode_cache. A rwlock allows multiple readers (one
thread of each core) to access the global cache and update microcode
simultaneously. Because the rwlock may be held in stop_machine context,
where interrupt is disabled, irq{save, restore} variants are used to
get/release the rwlock.

Note that printk in apply_microcode() and svm_host_osvm_init() (for AMD
only) are still processed sequentially.

Signed-off-by: Chao Gao <chao.gao@xxxxxxxxx>
---
Changes in v6:
 - introduce early_ucode_update_lock to serialize early ucode update.

Changes in v5:
 - newly add
---
 xen/arch/x86/microcode.c       | 65 ++++++++++++++++++++++++++----------------
 xen/arch/x86/microcode_amd.c   |  8 +-----
 xen/arch/x86/microcode_intel.c |  9 +-----
 3 files changed, 43 insertions(+), 39 deletions(-)

diff --git a/xen/arch/x86/microcode.c b/xen/arch/x86/microcode.c
index 96bcef6..e7df70f 100644
--- a/xen/arch/x86/microcode.c
+++ b/xen/arch/x86/microcode.c
@@ -204,7 +204,9 @@ scan:
 
 const struct microcode_ops *microcode_ops;
 
-static DEFINE_SPINLOCK(microcode_mutex);
+static DEFINE_RWLOCK(cache_rwlock);
+/* Hold this lock during early microcode update  */
+static DEFINE_SPINLOCK(early_ucode_update_lock);
 
 DEFINE_PER_CPU(struct cpu_signature, cpu_sig);
 
@@ -224,40 +226,47 @@ static atomic_t cpu_in, cpu_out;
  */
 bool microcode_save_patch(struct microcode_patch *new)
 {
-    struct microcode_patch *old;
+    struct microcode_patch *old, *free = NULL;
+    enum microcode_match_result result = MIS_UCODE;
+    unsigned long flags;
 
-    ASSERT(spin_is_locked(&microcode_mutex));
+    write_lock_irqsave(&cache_rwlock, flags);
 
     list_for_each_entry(old, &microcode_cache, list)
     {
-        enum microcode_match_result result =
-            microcode_ops->compare_patch(new, old);
+        result = microcode_ops->compare_patch(new, old);
 
         if ( result == OLD_UCODE )
         {
-            microcode_ops->free_patch(new);
-            return false;
+            free = new;
+            break;
         }
         else if ( result == NEW_UCODE )
         {
             list_replace(&old->list, &new->list);
-            microcode_ops->free_patch(old);
-            return true;
+            free = old;
+            break;
         }
         else /* result == MIS_UCODE */
             continue;
     }
 
-    if ( microcode_ops->is_patch_compatible &&
-         !microcode_ops->is_patch_compatible(new) )
+    if ( result == MIS_UCODE )
     {
-        xfree(new);
-        return false;
+        if ( microcode_ops->is_patch_compatible &&
+             !microcode_ops->is_patch_compatible(new) )
+            free = new;
+        else
+            list_add_tail(&new->list, &microcode_cache);
     }
 
-    list_add_tail(&new->list, &microcode_cache);
+    write_unlock_irqrestore(&cache_rwlock, flags);
+
+    /* free useless patches after interrupt enabled */
+    if ( free )
+        microcode_ops->free_patch(free);
 
-    return true;
+    return free != new;
 }
 
 /*
@@ -269,8 +278,6 @@ const struct microcode_patch *microcode_find_patch(void)
 {
     const struct microcode_patch *microcode_patch;
 
-    ASSERT(spin_is_locked(&microcode_mutex));
-
     list_for_each_entry(microcode_patch, &microcode_cache, list)
     {
         if ( microcode_ops->match_cpu(microcode_patch) )
@@ -288,11 +295,9 @@ static int microcode_parse_blob(char *buf, uint32_t len)
 {
     int ret;
 
-    spin_lock(&microcode_mutex);
     ret = microcode_ops->collect_cpu_info(&this_cpu(cpu_sig));
     if ( likely(!ret) )
         ret = microcode_ops->cpu_request_microcode(buf, len);
-    spin_unlock(&microcode_mutex);
 
     return ret;
 }
@@ -300,12 +305,15 @@ static int microcode_parse_blob(char *buf, uint32_t len)
 static int microcode_update_cpu(void)
 {
     int ret;
+    unsigned long flags;
 
-    spin_lock(&microcode_mutex);
     ret = microcode_ops->collect_cpu_info(&this_cpu(cpu_sig));
-    if ( likely(!ret) )
-        ret = microcode_ops->apply_microcode();
-    spin_unlock(&microcode_mutex);
+    if ( unlikely(ret) )
+        return ret;
+
+    read_lock_irqsave(&cache_rwlock, flags);
+    ret = microcode_ops->apply_microcode();
+    read_unlock_irqrestore(&cache_rwlock, flags);
 
     return ret;
 }
@@ -493,7 +501,16 @@ __initcall(microcode_init);
 
 int early_microcode_update_cpu(void)
 {
-    return microcode_ops ? microcode_update_cpu() : 0;
+    int rc;
+
+    if ( !microcode_ops )
+        return 0;
+
+    spin_lock(&early_ucode_update_lock);
+    rc = microcode_update_cpu();
+    spin_unlock(&early_ucode_update_lock);
+
+    return rc;
 }
 
 /*
diff --git a/xen/arch/x86/microcode_amd.c b/xen/arch/x86/microcode_amd.c
index 5c25ff2..4aa8fdc 100644
--- a/xen/arch/x86/microcode_amd.c
+++ b/xen/arch/x86/microcode_amd.c
@@ -74,9 +74,6 @@ struct mpbhdr {
     uint8_t data[];
 };
 
-/* serialize access to the physical write */
-static DEFINE_SPINLOCK(microcode_update_lock);
-
 /* See comment in start_update() for cases when this routine fails */
 static int collect_cpu_info(struct cpu_signature *csig)
 {
@@ -251,7 +248,6 @@ static enum microcode_match_result compare_patch(
 
 static int apply_microcode(void)
 {
-    unsigned long flags;
     uint32_t rev;
     const struct microcode_header_amd *hdr;
     const struct microcode_patch *patch;
@@ -265,15 +261,13 @@ static int apply_microcode(void)
 
     hdr = patch->data;
 
-    spin_lock_irqsave(&microcode_update_lock, flags);
+    BUG_ON(local_irq_is_enabled());
 
     hw_err = wrmsr_safe(MSR_AMD_PATCHLOADER, (unsigned long)hdr);
 
     /* get patch id after patching */
     rdmsrl(MSR_AMD_PATCHLEVEL, rev);
 
-    spin_unlock_irqrestore(&microcode_update_lock, flags);
-
     /*
      * Some processors leave the ucode blob mapping as UC after the update.
      * Flush the mapping to regain normal cacheability.
diff --git a/xen/arch/x86/microcode_intel.c b/xen/arch/x86/microcode_intel.c
index d5ef145..56f3956 100644
--- a/xen/arch/x86/microcode_intel.c
+++ b/xen/arch/x86/microcode_intel.c
@@ -95,9 +95,6 @@ struct extended_sigtable {
 
 #define STEPPING_MASK ~0xfU
 
-/* serialize access to the physical write to MSR 0x79 */
-static DEFINE_SPINLOCK(microcode_update_lock);
-
 static int collect_cpu_info(struct cpu_signature *csig)
 {
     unsigned int cpu_num = smp_processor_id();
@@ -334,7 +331,6 @@ static int get_matching_microcode(const void *mc, unsigned 
int cpu)
 
 static int apply_microcode(void)
 {
-    unsigned long flags;
     uint64_t msr_content;
     unsigned int val[2];
     unsigned int cpu_num = raw_smp_processor_id();
@@ -347,9 +343,7 @@ static int apply_microcode(void)
         return -EINVAL;
 
     mc_intel = patch->data;
-
-    /* serialize access to the physical write to MSR 0x79 */
-    spin_lock_irqsave(&microcode_update_lock, flags);
+    BUG_ON(local_irq_is_enabled());
 
     /*
      * Writeback and invalidate caches before updating microcode to avoid
@@ -368,7 +362,6 @@ static int apply_microcode(void)
     rdmsrl(MSR_IA32_UCODE_REV, msr_content);
     val[1] = (uint32_t)(msr_content >> 32);
 
-    spin_unlock_irqrestore(&microcode_update_lock, flags);
     if ( val[1] != mc_intel->hdr.rev )
     {
         printk(KERN_ERR "microcode: CPU%d update from revision "
-- 
1.8.3.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.