[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH] x86/HVM: replace open-coded non-local cache flushing



Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx>

 

From: Andrew Cooper [mailto:andrew.cooper3@xxxxxxxxxx]
Sent: Friday, April 25, 2014 6:56 PM
To: Jan Beulich
Cc: xen-devel; Tian, Kevin; Keir Fraser; suravee.suthikulpanit@xxxxxxx; Dong, Eddie; Nakajima, Jun; Boris Ostrovsky
Subject: Re: [Xen-devel] [PATCH] x86/HVM: replace open-coded non-local cache flushing

 

On 25/04/14 11:51, Jan Beulich wrote:

We accumulated quite a number of these, despite having a pre-canned
interface for it.
 
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>


Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>


 
 
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1823,11 +1823,6 @@ static bool_t domain_exit_uc_mode(struct
     return 1;
 }
 
-static void local_flush_cache(void *info)
-{
-    wbinvd();
-}
-
 static void hvm_set_uc_mode(struct vcpu *v, bool_t is_in_uc_mode)
 {
     v->domain->arch.hvm_domain.is_in_uc_mode = is_in_uc_mode;
@@ -1927,7 +1922,7 @@ void hvm_shadow_handle_cd(struct vcpu *v
             domain_pause_nosync(v->domain);
 
             /* Flush physical caches. */
-            on_each_cpu(local_flush_cache, NULL, 1);
+            flush_all(FLUSH_CACHE);
             hvm_set_uc_mode(v, 1);
 
             domain_unpause(v->domain);
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -2106,15 +2106,10 @@ static void svm_vmexit_mce_intercept(
     }
 }
 
-static void wbinvd_ipi(void *info)
-{
-    wbinvd();
-}
-
 static void svm_wbinvd_intercept(void)
 {
     if ( cache_flush_permitted(current->domain) )
-        on_each_cpu(wbinvd_ipi, NULL, 1);
+        flush_all(FLUSH_CACHE);
 }
 
 static void svm_vmexit_do_invalidate_cache(struct cpu_user_regs *regs)
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1301,11 +1301,6 @@ void vm_resume_fail(void)
     domain_crash_synchronous();
 }
 
-static void wbinvd_ipi(void *info)
-{
-    wbinvd();
-}
-
 void vmx_do_resume(struct vcpu *v)
 {
     bool_t debug_state;
@@ -1332,7 +1327,7 @@ void vmx_do_resume(struct vcpu *v)
         {
             int cpu = v->arch.hvm_vmx.active_cpu;
             if ( cpu != -1 )
-                on_selected_cpus(cpumask_of(cpu), wbinvd_ipi, NULL, 1);
+                flush_mask(cpumask_of(cpu), FLUSH_CACHE);
         }
 
         vmx_clear_vmcs(v);
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2307,18 +2307,13 @@ static void vmx_do_extint(struct cpu_use
     do_IRQ(regs);
 }
 
-static void wbinvd_ipi(void *info)
-{
-    wbinvd();
-}
-
 static void vmx_wbinvd_intercept(void)
 {
     if ( !cache_flush_permitted(current->domain) || iommu_snoop )
         return;
 
     if ( cpu_has_wbinvd_exiting )
-        on_each_cpu(wbinvd_ipi, NULL, 1);
+        flush_all(FLUSH_CACHE);
     else
         wbinvd();
 }
 
 
 




_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.