|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 2/2] RFC: x86/mm: conditionally check page_lock/page_unlock ownership
Patch cf4b30dca0a "Add debug code to detect illegal page_lock and put_page_type
ordering" added extra sanity checking to page_lock/page_unlock for debug builds
with the assumption that no hypervisor path ever locks two pages at once.
This assumption doesn't hold during memory sharing.
This is only an RFC as I currently don't have a better idea how to resolve this
issue while also keeping the sanity-check in place.
Signed-off-by: Tamas K Lengyel <tamas@xxxxxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: Roger Pau Monne <roger.pau@xxxxxxxxxx>
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
xen/arch/x86/domain.c | 4 ++--
xen/arch/x86/mm.c | 20 +++++++++++---------
xen/arch/x86/mm/mem_sharing.c | 4 ++--
xen/arch/x86/pv/grant_table.c | 12 ++++++------
xen/arch/x86/pv/ro-page-fault.c | 6 +++---
xen/include/asm-x86/mm.h | 4 ++--
6 files changed, 26 insertions(+), 24 deletions(-)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 8d579e2cf9..93cda1ccdd 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -995,13 +995,13 @@ int arch_set_info_guest(
{
struct page_info *page = page_list_remove_head(&d->page_list);
- if ( page_lock(page) )
+ if ( page_lock(page, 1) )
{
if ( (page->u.inuse.type_info & PGT_type_mask) ==
PGT_l4_page_table )
done = !fill_ro_mpt(page_to_mfn(page));
- page_unlock(page);
+ page_unlock(page, 1);
}
page_list_add_tail(page, &d->page_list);
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index a88cd9ce7c..ff734e362c 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2030,11 +2030,12 @@ static inline bool current_locked_page_ne_check(struct
page_info *page) {
#define current_locked_page_ne_check(x) true
#endif
-int page_lock(struct page_info *page)
+int page_lock(struct page_info *page, bool lock_check)
{
unsigned long x, nx;
- ASSERT(current_locked_page_check(NULL));
+ if ( lock_check )
+ ASSERT(current_locked_page_check(NULL));
do {
while ( (x = page->u.inuse.type_info) & PGT_locked )
@@ -2051,11 +2052,12 @@ int page_lock(struct page_info *page)
return 1;
}
-void page_unlock(struct page_info *page)
+void page_unlock(struct page_info *page, bool lock_check)
{
unsigned long x, nx, y = page->u.inuse.type_info;
- ASSERT(current_locked_page_check(page));
+ if ( lock_check )
+ ASSERT(current_locked_page_check(page));
do {
x = y;
@@ -3897,7 +3899,7 @@ long do_mmu_update(
}
va = _p(((unsigned long)va & PAGE_MASK) + (req.ptr & ~PAGE_MASK));
- if ( page_lock(page) )
+ if ( page_lock(page, 1) )
{
switch ( page->u.inuse.type_info & PGT_type_mask )
{
@@ -3954,7 +3956,7 @@ long do_mmu_update(
rc = 0;
break;
}
- page_unlock(page);
+ page_unlock(page, 1);
if ( rc == -EINTR )
rc = -ERESTART;
}
@@ -4247,7 +4249,7 @@ static int __do_update_va_mapping(
if ( unlikely(!gl1pg) )
goto out;
- if ( !page_lock(gl1pg) )
+ if ( !page_lock(gl1pg, 1) )
{
put_page(gl1pg);
goto out;
@@ -4255,7 +4257,7 @@ static int __do_update_va_mapping(
if ( (gl1pg->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table )
{
- page_unlock(gl1pg);
+ page_unlock(gl1pg, 1);
put_page(gl1pg);
goto out;
}
@@ -4263,7 +4265,7 @@ static int __do_update_va_mapping(
rc = mod_l1_entry(pl1e, val, mfn_x(gl1mfn), MMU_NORMAL_PT_UPDATE, v,
pg_owner);
- page_unlock(gl1pg);
+ page_unlock(gl1pg, 1);
put_page(gl1pg);
out:
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index 345a1778f9..777af7f7c7 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -118,7 +118,7 @@ static inline int mem_sharing_page_lock(struct page_info
*pg)
pg_lock_data_t *pld = &(this_cpu(__pld));
page_sharing_mm_pre_lock();
- rc = page_lock(pg);
+ rc = page_lock(pg, 0);
if ( rc )
{
preempt_disable();
@@ -135,7 +135,7 @@ static inline void mem_sharing_page_unlock(struct page_info
*pg)
page_sharing_mm_unlock(pld->mm_unlock_level,
&pld->recurse_count);
preempt_enable();
- page_unlock(pg);
+ page_unlock(pg, 0);
}
static inline shr_handle_t get_next_handle(void)
diff --git a/xen/arch/x86/pv/grant_table.c b/xen/arch/x86/pv/grant_table.c
index 5180334f42..be9bbe7c4c 100644
--- a/xen/arch/x86/pv/grant_table.c
+++ b/xen/arch/x86/pv/grant_table.c
@@ -101,7 +101,7 @@ int create_grant_pv_mapping(uint64_t addr, mfn_t frame,
goto out_unmap;
}
- if ( !page_lock(page) )
+ if ( !page_lock(page, 1) )
goto out_put;
if ( (page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table )
@@ -112,7 +112,7 @@ int create_grant_pv_mapping(uint64_t addr, mfn_t frame,
rc = GNTST_okay;
out_unlock:
- page_unlock(page);
+ page_unlock(page, 1);
out_put:
put_page(page);
out_unmap:
@@ -158,7 +158,7 @@ static bool steal_linear_address(unsigned long linear,
l1_pgentry_t *out)
if ( !page )
goto out_unmap;
- if ( !page_lock(page) )
+ if ( !page_lock(page, 1) )
goto out_put;
if ( (page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table )
@@ -171,7 +171,7 @@ static bool steal_linear_address(unsigned long linear,
l1_pgentry_t *out)
*out = ol1e;
out_unlock:
- page_unlock(page);
+ page_unlock(page, 1);
out_put:
put_page(page);
out_unmap:
@@ -264,7 +264,7 @@ int replace_grant_pv_mapping(uint64_t addr, mfn_t frame,
goto out_unmap;
}
- if ( !page_lock(page) )
+ if ( !page_lock(page, 1) )
goto out_put;
if ( (page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table )
@@ -297,7 +297,7 @@ int replace_grant_pv_mapping(uint64_t addr, mfn_t frame,
rc = GNTST_okay;
out_unlock:
- page_unlock(page);
+ page_unlock(page, 1);
out_put:
put_page(page);
out_unmap:
diff --git a/xen/arch/x86/pv/ro-page-fault.c b/xen/arch/x86/pv/ro-page-fault.c
index e7a7179dda..8e90e82f0b 100644
--- a/xen/arch/x86/pv/ro-page-fault.c
+++ b/xen/arch/x86/pv/ro-page-fault.c
@@ -277,7 +277,7 @@ static int ptwr_do_page_fault(struct x86_emulate_ctxt *ctxt,
if ( !page )
return X86EMUL_UNHANDLEABLE;
- if ( !page_lock(page) )
+ if ( !page_lock(page, 1) )
{
put_page(page);
return X86EMUL_UNHANDLEABLE;
@@ -285,7 +285,7 @@ static int ptwr_do_page_fault(struct x86_emulate_ctxt *ctxt,
if ( (page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table )
{
- page_unlock(page);
+ page_unlock(page, 1);
put_page(page);
return X86EMUL_UNHANDLEABLE;
}
@@ -293,7 +293,7 @@ static int ptwr_do_page_fault(struct x86_emulate_ctxt *ctxt,
ctxt->data = &ptwr_ctxt;
rc = x86_emulate(ctxt, &ptwr_emulate_ops);
- page_unlock(page);
+ page_unlock(page, 1);
put_page(page);
return rc;
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 6faa563167..dae52e8880 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -375,8 +375,8 @@ const struct platform_bad_page
*get_platform_badpages(unsigned int *array_size);
* These two users (pte serialization and memory sharing) do not collide, since
* sharing is only supported for hvm guests, which do not perform pv pte
updates.
*/
-int page_lock(struct page_info *page);
-void page_unlock(struct page_info *page);
+int page_lock(struct page_info *page, bool lock_check);
+void page_unlock(struct page_info *page, bool lock_check);
void put_page_type(struct page_info *page);
int get_page_type(struct page_info *page, unsigned long type);
--
2.20.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |