|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 1 of 2] x86/mm: When mem event automatically promotes access rights, let other subsystems know
xen/arch/x86/hvm/hvm.c | 45 ++++++++++++++++++++++++++++++++++++++-------
xen/arch/x86/mm/p2m.c | 8 +++++---
xen/include/asm-x86/p2m.h | 9 +++++----
3 files changed, 48 insertions(+), 14 deletions(-)
The mem event fault handler in the p2m can automatically promote the access
rights of a p2m entry. In those scenarios, vcpu's are not paused and they will
immediately retry the faulting instructions. This will generate a second fault
if the underlying entry type requires so (paging, unsharing, pod, etc).
Collapse the two faults into a single one.
Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
diff -r 29701f5bdd84 -r d6354df726a0 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1278,9 +1278,13 @@ int hvm_hap_nested_page_fault(unsigned l
if ( violation )
{
- p2m_mem_access_check(gpa, gla_valid, gla, access_r, access_w,
access_x);
- rc = 1;
- goto out_put_gfn;
+ if ( !p2m_mem_access_check(gpa, gla_valid, gla, access_r,
+ access_w, access_x) )
+ {
+ /* Rights not promoted, vcpu paused, work here is done */
+ rc = 1;
+ goto out_put_gfn;
+ }
}
}
@@ -1288,7 +1292,8 @@ int hvm_hap_nested_page_fault(unsigned l
* If this GFN is emulated MMIO or marked as read-only, pass the fault
* to the mmio handler.
*/
- if ( (p2mt == p2m_mmio_dm) || (p2mt == p2m_ram_ro) )
+ if ( (p2mt == p2m_mmio_dm) ||
+ (access_w && (p2mt == p2m_ram_ro)) )
{
if ( !handle_mmio() )
hvm_inject_exception(TRAP_gp_fault, 0, 0);
@@ -1302,7 +1307,7 @@ int hvm_hap_nested_page_fault(unsigned l
p2m_mem_paging_populate(v->domain, gfn);
/* Mem sharing: unshare the page and try again */
- if ( p2mt == p2m_ram_shared )
+ if ( access_w && (p2mt == p2m_ram_shared) )
{
ASSERT(!p2m_is_nestedp2m(p2m));
mem_sharing_unshare_page(p2m->domain, gfn, 0);
@@ -1319,14 +1324,15 @@ int hvm_hap_nested_page_fault(unsigned l
* a large page, we do not change other pages type within that large
* page.
*/
- paging_mark_dirty(v->domain, mfn_x(mfn));
+ if ( access_w )
+ paging_mark_dirty(v->domain, mfn_x(mfn));
p2m_change_type(v->domain, gfn, p2m_ram_logdirty, p2m_ram_rw);
rc = 1;
goto out_put_gfn;
}
/* Shouldn't happen: Maybe the guest was writing to a r/o grant mapping? */
- if ( p2mt == p2m_grant_map_ro )
+ if ( access_w && (p2mt == p2m_grant_map_ro) )
{
gdprintk(XENLOG_WARNING,
"trying to write to read-only grant mapping\n");
@@ -1335,6 +1341,31 @@ int hvm_hap_nested_page_fault(unsigned l
goto out_put_gfn;
}
+ if ( access_x && (p2m_is_grant(p2mt)) )
+ {
+ gdprintk(XENLOG_WARNING,
+ "trying to execut a grant mapping\n");
+ hvm_inject_exception(TRAP_gp_fault, 0, 0);
+ rc = 1;
+ goto out_put_gfn;
+ }
+
+ if ( p2m_is_grant(p2mt) )
+ {
+ /* If we haven't caught this by now, then it's a valid access */
+ rc = 1;
+ goto out_put_gfn;
+ }
+
+ if ( p2mt == p2m_mmio_direct )
+ {
+ if ( !(access_w &&
+ rangeset_contains_singleton(mmio_ro_ranges, mfn_x(mfn))) ) {
+ rc = 1;
+ goto out_put_gfn;
+ }
+ }
+
rc = 0;
out_put_gfn:
put_gfn(p2m->domain, gfn);
diff -r 29701f5bdd84 -r d6354df726a0 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1126,7 +1126,7 @@ void p2m_mem_paging_resume(struct domain
mem_event_unpause_vcpus(d, &d->mem_paging);
}
-void p2m_mem_access_check(unsigned long gpa, bool_t gla_valid, unsigned long
gla,
+int p2m_mem_access_check(unsigned long gpa, bool_t gla_valid, unsigned long
gla,
bool_t access_r, bool_t access_w, bool_t access_x)
{
struct vcpu *v = current;
@@ -1146,7 +1146,7 @@ void p2m_mem_access_check(unsigned long
{
p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rw);
p2m_unlock(p2m);
- return;
+ return 1;
}
p2m_unlock(p2m);
@@ -1166,9 +1166,10 @@ void p2m_mem_access_check(unsigned long
p2m_lock(p2m);
p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rwx);
p2m_unlock(p2m);
+ return 1;
}
- return;
+ return 0;
}
memset(&req, 0, sizeof(req));
@@ -1192,6 +1193,7 @@ void p2m_mem_access_check(unsigned long
(void)mem_event_put_request(d, &d->mem_access, &req);
/* VCPU paused */
+ return 0;
}
void p2m_mem_access_resume(struct domain *d)
diff -r 29701f5bdd84 -r d6354df726a0 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -491,8 +491,9 @@ static inline void p2m_mem_paging_popula
#ifdef __x86_64__
/* Send mem event based on the access (gla is -1ull if not available). Handles
- * the rw2rx conversion */
-void p2m_mem_access_check(unsigned long gpa, bool_t gla_valid, unsigned long
gla,
+ * the rw2rx conversion. Return value indicate if access rights have been
+ * promoted with no underlying vcpu pause. */
+int p2m_mem_access_check(unsigned long gpa, bool_t gla_valid, unsigned long
gla,
bool_t access_r, bool_t access_w, bool_t access_x);
/* Resumes the running of the VCPU, restarting the last instruction */
void p2m_mem_access_resume(struct domain *d);
@@ -508,10 +509,10 @@ int p2m_get_mem_access(struct domain *d,
hvmmem_access_t *access);
#else
-static inline void p2m_mem_access_check(unsigned long gpa, bool_t gla_valid,
+static inline int p2m_mem_access_check(unsigned long gpa, bool_t gla_valid,
unsigned long gla, bool_t access_r,
bool_t access_w, bool_t access_x)
-{ }
+{ return 1; }
static inline int p2m_set_mem_access(struct domain *d,
unsigned long start_pfn,
uint32_t nr, hvmmem_access_t access)
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |