|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [RFC PATCH 2/4] x86/shadow: Replace guest_tlb_flush_mask with sh_flush_tlb_mask
Introduce sh_flush_tlb_{mask,local} variants used to flush the
tlb from within the shadow paging code. This is meant to decouple
shadow code from the more general guest_tlb_flush_mask.
Not a functional change.
Signed-off-by: Teddy Astie <teddy.astie@xxxxxxxxxx>
---
xen/arch/x86/mm/shadow/common.c | 12 ++++++------
xen/arch/x86/mm/shadow/hvm.c | 8 ++++----
xen/arch/x86/mm/shadow/multi.c | 18 ++++++------------
xen/arch/x86/mm/shadow/private.h | 22 ++++++++++++++++++++++
4 files changed, 38 insertions(+), 22 deletions(-)
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 0176e33bc9..8511da5c7f 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -421,7 +421,7 @@ static int oos_remove_write_access(struct vcpu *v, mfn_t
gmfn,
}
if ( ftlb )
- guest_flush_tlb_mask(d, d->dirty_cpumask);
+ sh_flush_tlb_mask(d, d->dirty_cpumask);
return 0;
}
@@ -969,7 +969,7 @@ static bool __must_check _shadow_prealloc(struct domain *d,
unsigned int pages)
/* See if that freed up enough space */
if ( d->arch.paging.free_pages >= pages )
{
- guest_flush_tlb_mask(d, d->dirty_cpumask);
+ sh_flush_tlb_mask(d, d->dirty_cpumask);
return true;
}
}
@@ -984,7 +984,7 @@ static bool __must_check _shadow_prealloc(struct domain *d,
unsigned int pages)
ASSERT_UNREACHABLE();
- guest_flush_tlb_mask(d, d->dirty_cpumask);
+ sh_flush_tlb_mask(d, d->dirty_cpumask);
return false;
}
@@ -1052,7 +1052,7 @@ void shadow_blow_tables(struct domain *d)
0);
/* Make sure everyone sees the unshadowings */
- guest_flush_tlb_mask(d, d->dirty_cpumask);
+ sh_flush_tlb_mask(d, d->dirty_cpumask);
}
void shadow_blow_tables_per_domain(struct domain *d)
@@ -1157,7 +1157,7 @@ mfn_t shadow_alloc(struct domain *d,
if ( unlikely(!cpumask_empty(&mask)) )
{
perfc_incr(shadow_alloc_tlbflush);
- guest_flush_tlb_mask(d, &mask);
+ sh_flush_tlb_mask(d, &mask);
}
/* Now safe to clear the page for reuse */
clear_domain_page(page_to_mfn(sp));
@@ -2276,7 +2276,7 @@ void sh_remove_shadows(struct domain *d, mfn_t gmfn, int
fast, int all)
/* Need to flush TLBs now, so that linear maps are safe next time we
* take a fault. */
- guest_flush_tlb_mask(d, d->dirty_cpumask);
+ sh_flush_tlb_mask(d, d->dirty_cpumask);
paging_unlock(d);
}
diff --git a/xen/arch/x86/mm/shadow/hvm.c b/xen/arch/x86/mm/shadow/hvm.c
index 114957a3e1..b558ed82e8 100644
--- a/xen/arch/x86/mm/shadow/hvm.c
+++ b/xen/arch/x86/mm/shadow/hvm.c
@@ -594,7 +594,7 @@ static void validate_guest_pt_write(struct vcpu *v, mfn_t
gmfn,
if ( rc & SHADOW_SET_FLUSH )
/* Need to flush TLBs to pick up shadow PT changes */
- guest_flush_tlb_mask(d, d->dirty_cpumask);
+ sh_flush_tlb_mask(d, d->dirty_cpumask);
if ( rc & SHADOW_SET_ERROR )
{
@@ -744,7 +744,7 @@ bool cf_check shadow_flush_tlb(const unsigned long
*vcpu_bitmap)
}
/* Flush TLBs on all CPUs with dirty vcpu state. */
- guest_flush_tlb_mask(d, mask);
+ sh_flush_tlb_mask(d, mask);
/* Done. */
for_each_vcpu ( d, v )
@@ -978,7 +978,7 @@ static void cf_check sh_unshadow_for_p2m_change(
}
if ( flush )
- guest_flush_tlb_mask(d, d->dirty_cpumask);
+ sh_flush_tlb_mask(d, d->dirty_cpumask);
}
#if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH)
@@ -1196,7 +1196,7 @@ int shadow_track_dirty_vram(struct domain *d,
}
}
if ( flush_tlb )
- guest_flush_tlb_mask(d, d->dirty_cpumask);
+ sh_flush_tlb_mask(d, d->dirty_cpumask);
goto out;
out_sl1ma:
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 03be61e225..3924ff4da6 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -79,12 +79,6 @@ const char *const fetch_type_names[] = {
# define for_each_shadow_table(v, i) for ( (i) = 0; (i) < 1; ++(i) )
#endif
-/* Helper to perform a local TLB flush. */
-static void sh_flush_local(const struct domain *d)
-{
- flush_local(guest_flush_tlb_flags(d));
-}
-
#if GUEST_PAGING_LEVELS >= 4 && defined(CONFIG_PV32)
#define ASSERT_VALID_L2(t) \
ASSERT((t) == SH_type_l2_shadow || (t) == SH_type_l2h_shadow)
@@ -2429,7 +2423,7 @@ static int cf_check sh_page_fault(
perfc_incr(shadow_rm_write_flush_tlb);
smp_wmb();
atomic_inc(&d->arch.paging.shadow.gtable_dirty_version);
- guest_flush_tlb_mask(d, d->dirty_cpumask);
+ sh_flush_tlb_mask(d, d->dirty_cpumask);
}
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
@@ -3243,7 +3237,7 @@ static pagetable_t cf_check sh_update_cr3(struct vcpu *v,
bool noflush)
* (old) shadow linear maps in the writeable mapping heuristics. */
#if GUEST_PAGING_LEVELS == 4
if ( sh_remove_write_access(d, gmfn, 4, 0) != 0 )
- guest_flush_tlb_mask(d, d->dirty_cpumask);
+ sh_flush_tlb_mask(d, d->dirty_cpumask);
old_entry = sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l4_shadow,
sh_make_shadow);
if ( unlikely(pagetable_is_null(v->arch.paging.shadow.shadow_table[0])) )
@@ -3284,7 +3278,7 @@ static pagetable_t cf_check sh_update_cr3(struct vcpu *v,
bool noflush)
}
}
if ( flush )
- guest_flush_tlb_mask(d, d->dirty_cpumask);
+ sh_flush_tlb_mask(d, d->dirty_cpumask);
/* Now install the new shadows. */
for ( i = 0; i < 4; i++ )
{
@@ -3309,7 +3303,7 @@ static pagetable_t cf_check sh_update_cr3(struct vcpu *v,
bool noflush)
}
#elif GUEST_PAGING_LEVELS == 2
if ( sh_remove_write_access(d, gmfn, 2, 0) != 0 )
- guest_flush_tlb_mask(d, d->dirty_cpumask);
+ sh_flush_tlb_mask(d, d->dirty_cpumask);
old_entry = sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l2_shadow,
sh_make_shadow);
ASSERT(pagetable_is_null(old_entry));
@@ -3747,7 +3741,7 @@ static void cf_check sh_pagetable_dying(paddr_t gpa)
}
}
if ( flush )
- guest_flush_tlb_mask(d, d->dirty_cpumask);
+ sh_flush_tlb_mask(d, d->dirty_cpumask);
/* Remember that we've seen the guest use this interface, so we
* can rely on it using it in future, instead of guessing at
@@ -3786,7 +3780,7 @@ static void cf_check sh_pagetable_dying(paddr_t gpa)
mfn_to_page(gmfn)->pagetable_dying = true;
shadow_unhook_mappings(d, smfn, 1/* user pages only */);
/* Now flush the TLB: we removed toplevel mappings. */
- guest_flush_tlb_mask(d, d->dirty_cpumask);
+ sh_flush_tlb_mask(d, d->dirty_cpumask);
}
/* Remember that we've seen the guest use this interface, so we
diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h
index cef9dbef2e..565a334bc0 100644
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -15,6 +15,7 @@
// been included...
#include <asm/page.h>
#include <xen/domain_page.h>
+#include <asm/flushtlb.h>
#include <asm/x86_emulate.h>
#include <asm/hvm/support.h>
#include <asm/atomic.h>
@@ -910,6 +911,27 @@ static inline int sh_check_page_has_no_refs(struct
page_info *page)
((count & PGC_allocated) ? 1 : 0) );
}
+/* Helper to perform a local TLB flush. */
+static inline void sh_flush_local(const struct domain *d)
+{
+ unsigned int flags = FLUSH_TLB;
+
+ if ( is_hvm_domain(d) )
+ flags |= FLUSH_HVM_ASID_CORE;
+
+ flush_local(flags);
+}
+
+static inline void sh_flush_tlb_mask(const struct domain *d, const cpumask_t
*mask)
+{
+ unsigned int flags = FLUSH_TLB;
+
+ if ( is_hvm_domain(d) )
+ flags |= FLUSH_HVM_ASID_CORE;
+
+ flush_mask(mask, flags);
+}
+
/* Flush the TLB of the selected vCPUs. */
bool cf_check shadow_flush_tlb(const unsigned long *vcpu_bitmap);
--
2.51.2
--
Teddy Astie | Vates XCP-ng Developer
XCP-ng & Xen Orchestra - Vates solutions
web: https://vates.tech
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |