|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 2/5] x86/pv: Rename invalidate_shadow_ldt() to pv_destroy_ldt()
and move it into pv/descriptor-tables.c beside its GDT counterpart. Reduce
the !in_irq() check from a BUG_ON() to ASSERT().
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
---
xen/arch/x86/mm.c | 51 ++++---------------------------------
xen/arch/x86/pv/descriptor-tables.c | 42 ++++++++++++++++++++++++++++--
xen/include/asm-x86/pv/mm.h | 3 +++
3 files changed, 48 insertions(+), 48 deletions(-)
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index a56f875..14cfa93 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -125,6 +125,7 @@
#include <asm/hvm/grant_table.h>
#include <asm/pv/grant_table.h>
+#include <asm/pv/mm.h>
#include "pv/mm.h"
@@ -544,48 +545,6 @@ static inline void set_tlbflush_timestamp(struct page_info
*page)
const char __section(".bss.page_aligned.const") __aligned(PAGE_SIZE)
zero_page[PAGE_SIZE];
-/*
- * Flush the LDT, dropping any typerefs. Returns a boolean indicating whether
- * mappings have been removed (i.e. a TLB flush is needed).
- */
-static bool invalidate_shadow_ldt(struct vcpu *v)
-{
- l1_pgentry_t *pl1e;
- unsigned int i, mappings_dropped = 0;
- struct page_info *page;
-
- BUG_ON(unlikely(in_irq()));
-
- spin_lock(&v->arch.pv_vcpu.shadow_ldt_lock);
-
- if ( v->arch.pv_vcpu.shadow_ldt_mapcnt == 0 )
- goto out;
-
- pl1e = pv_ldt_ptes(v);
-
- for ( i = 0; i < 16; i++ )
- {
- if ( !(l1e_get_flags(pl1e[i]) & _PAGE_PRESENT) )
- continue;
-
- page = l1e_get_page(pl1e[i]);
- l1e_write(&pl1e[i], l1e_empty());
- mappings_dropped++;
-
- ASSERT_PAGE_IS_TYPE(page, PGT_seg_desc_page);
- ASSERT_PAGE_IS_DOMAIN(page, v->domain);
- put_page_and_type(page);
- }
-
- ASSERT(v->arch.pv_vcpu.shadow_ldt_mapcnt == mappings_dropped);
- v->arch.pv_vcpu.shadow_ldt_mapcnt = 0;
-
- out:
- spin_unlock(&v->arch.pv_vcpu.shadow_ldt_lock);
-
- return mappings_dropped;
-}
-
static int alloc_segdesc_page(struct page_info *page)
{
@@ -1242,7 +1201,7 @@ void put_page_from_l1e(l1_pgentry_t l1e, struct domain
*l1e_owner)
{
for_each_vcpu ( pg_owner, v )
{
- if ( invalidate_shadow_ldt(v) )
+ if ( pv_destroy_ldt(v) )
flush_tlb_mask(v->vcpu_dirty_cpumask);
}
}
@@ -2825,7 +2784,7 @@ int new_guest_cr3(mfn_t mfn)
return rc;
}
- invalidate_shadow_ldt(curr); /* Unconditional TLB flush later. */
+ pv_destroy_ldt(curr); /* Unconditional TLB flush later. */
write_ptbase(curr);
return 0;
@@ -2861,7 +2820,7 @@ int new_guest_cr3(mfn_t mfn)
return rc;
}
- invalidate_shadow_ldt(curr); /* Unconditional TLB flush later. */
+ pv_destroy_ldt(curr); /* Unconditional TLB flush later. */
if ( !VM_ASSIST(d, m2p_strict) && !paging_mode_refcounts(d) )
fill_ro_mpt(mfn);
@@ -3368,7 +3327,7 @@ long do_mmuext_op(
else if ( (curr->arch.pv_vcpu.ldt_ents != ents) ||
(curr->arch.pv_vcpu.ldt_base != ptr) )
{
- if ( invalidate_shadow_ldt(curr) )
+ if ( pv_destroy_ldt(curr) )
flush_tlb_local();
curr->arch.pv_vcpu.ldt_base = ptr;
diff --git a/xen/arch/x86/pv/descriptor-tables.c
b/xen/arch/x86/pv/descriptor-tables.c
index d1c4296..b418bbb 100644
--- a/xen/arch/x86/pv/descriptor-tables.c
+++ b/xen/arch/x86/pv/descriptor-tables.c
@@ -31,9 +31,47 @@
#undef page_to_mfn
#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
-/*******************
- * Descriptor Tables
+/*
+ * Flush the LDT, dropping any typerefs. Returns a boolean indicating whether
+ * mappings have been removed (i.e. a TLB flush is needed).
*/
+bool pv_destroy_ldt(struct vcpu *v)
+{
+ l1_pgentry_t *pl1e;
+ unsigned int i, mappings_dropped = 0;
+ struct page_info *page;
+
+ ASSERT(!in_irq());
+
+ spin_lock(&v->arch.pv_vcpu.shadow_ldt_lock);
+
+ if ( v->arch.pv_vcpu.shadow_ldt_mapcnt == 0 )
+ goto out;
+
+ pl1e = pv_ldt_ptes(v);
+
+ for ( i = 0; i < 16; i++ )
+ {
+ if ( !(l1e_get_flags(pl1e[i]) & _PAGE_PRESENT) )
+ continue;
+
+ page = l1e_get_page(pl1e[i]);
+ l1e_write(&pl1e[i], l1e_empty());
+ mappings_dropped++;
+
+ ASSERT_PAGE_IS_TYPE(page, PGT_seg_desc_page);
+ ASSERT_PAGE_IS_DOMAIN(page, v->domain);
+ put_page_and_type(page);
+ }
+
+ ASSERT(v->arch.pv_vcpu.shadow_ldt_mapcnt == mappings_dropped);
+ v->arch.pv_vcpu.shadow_ldt_mapcnt = 0;
+
+ out:
+ spin_unlock(&v->arch.pv_vcpu.shadow_ldt_lock);
+
+ return mappings_dropped;
+}
void pv_destroy_gdt(struct vcpu *v)
{
diff --git a/xen/include/asm-x86/pv/mm.h b/xen/include/asm-x86/pv/mm.h
index 5d2fe4c..246b990 100644
--- a/xen/include/asm-x86/pv/mm.h
+++ b/xen/include/asm-x86/pv/mm.h
@@ -29,6 +29,7 @@ long pv_set_gdt(struct vcpu *v, unsigned long *frames,
unsigned int entries);
void pv_destroy_gdt(struct vcpu *v);
bool pv_map_ldt_shadow_page(unsigned int off);
+bool pv_destroy_ldt(struct vcpu *v);
#else
@@ -48,6 +49,8 @@ static inline long pv_set_gdt(struct vcpu *v, unsigned long
*frames,
static inline void pv_destroy_gdt(struct vcpu *v) { ASSERT_UNREACHABLE(); }
static inline bool pv_map_ldt_shadow_page(unsigned int off) { return false; }
+static inline bool pv_destroy_ldt(struct vcpu *v)
+{ ASSERT_UNREACHABLE(); return false; }
#endif
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |