|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v6 3/9] x86/mm: add disallow_mask parameter to get_page_from_l1e
This will make moving pv mm code easier. To retain same behaviour the
base mask is copied to shadow code.
No functional change.
Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
xen/arch/x86/mm.c | 13 +++++++------
xen/arch/x86/mm/shadow/multi.c | 15 ++++++++++++---
xen/arch/x86/pv/ro-page-fault.c | 2 +-
xen/include/asm-x86/mm.h | 3 ++-
4 files changed, 22 insertions(+), 11 deletions(-)
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index bfa0a6436c..53212bcce3 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -831,7 +831,8 @@ static int print_mmio_emul_range(unsigned long s, unsigned
long e, void *arg)
*/
int
get_page_from_l1e(
- l1_pgentry_t l1e, struct domain *l1e_owner, struct domain *pg_owner)
+ l1_pgentry_t l1e, struct domain *l1e_owner, struct domain *pg_owner,
+ uint32_t disallow_mask)
{
unsigned long mfn = l1e_get_pfn(l1e);
struct page_info *page = mfn_to_page(_mfn(mfn));
@@ -843,10 +844,9 @@ get_page_from_l1e(
if ( !(l1f & _PAGE_PRESENT) )
return 0;
- if ( unlikely(l1f & l1_disallow_mask(l1e_owner)) )
+ if ( unlikely(l1f & disallow_mask) )
{
- gdprintk(XENLOG_WARNING, "Bad L1 flags %x\n",
- l1f & l1_disallow_mask(l1e_owner));
+ gdprintk(XENLOG_WARNING, "Bad L1 flags %x\n", l1f & disallow_mask);
return -EINVAL;
}
@@ -1318,7 +1318,7 @@ static int alloc_l1_table(struct page_info *page)
for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
{
- switch ( ret = get_page_from_l1e(pl1e[i], d, d) )
+ switch ( ret = get_page_from_l1e(pl1e[i], d, d, l1_disallow_mask(d)) )
{
default:
goto fail;
@@ -1957,7 +1957,8 @@ static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t
nl1e,
return rc ? 0 : -EBUSY;
}
- switch ( rc = get_page_from_l1e(nl1e, pt_dom, pg_dom) )
+ switch ( rc = get_page_from_l1e(nl1e, pt_dom, pg_dom,
+ l1_disallow_mask(pt_dom)) )
{
default:
if ( page )
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index a6372e3a02..02c2198c9b 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -858,13 +858,21 @@ shadow_get_page_from_l1e(shadow_l1e_t sl1e, struct domain
*d, p2m_type_t type)
int res;
mfn_t mfn;
struct domain *owner;
+ /* The disallow mask is taken from arch/x86/mm.c for HVM guest */
+ uint32_t disallow_mask =
+ ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED |
+ _PAGE_DIRTY | _PAGE_AVAIL | _PAGE_AVAIL_HIGH | _PAGE_NX);
+ disallow_mask = (disallow_mask | _PAGE_GNTTAB) & ~_PAGE_GLOBAL;
+ disallow_mask &= ~PAGE_CACHE_ATTRS;
+
+ ASSERT(is_hvm_domain(d));
ASSERT(!sh_l1e_is_magic(sl1e));
if ( !shadow_mode_refcounts(d) )
return 1;
- res = get_page_from_l1e(sl1e, d, d);
+ res = get_page_from_l1e(sl1e, d, d, disallow_mask);
// If a privileged domain is attempting to install a map of a page it does
// not own, we let it succeed anyway.
@@ -877,7 +885,7 @@ shadow_get_page_from_l1e(shadow_l1e_t sl1e, struct domain
*d, p2m_type_t type)
{
res = xsm_priv_mapping(XSM_TARGET, d, owner);
if ( !res ) {
- res = get_page_from_l1e(sl1e, d, owner);
+ res = get_page_from_l1e(sl1e, d, owner, disallow_mask);
SHADOW_PRINTK("privileged domain %d installs map of mfn %"PRI_mfn"
"
"which is owned by d%d: %s\n",
d->domain_id, mfn_x(mfn), owner->domain_id,
@@ -896,7 +904,8 @@ shadow_get_page_from_l1e(shadow_l1e_t sl1e, struct domain
*d, p2m_type_t type)
we can just grab a reference directly. */
mfn = shadow_l1e_get_mfn(sl1e);
if ( mfn_valid(mfn) )
- res = get_page_from_l1e(sl1e, d, page_get_owner(mfn_to_page(mfn)));
+ res = get_page_from_l1e(sl1e, d, page_get_owner(mfn_to_page(mfn)),
+ disallow_mask);
}
if ( unlikely(res < 0) )
diff --git a/xen/arch/x86/pv/ro-page-fault.c b/xen/arch/x86/pv/ro-page-fault.c
index 7e0e7e8dfc..04b4e455f5 100644
--- a/xen/arch/x86/pv/ro-page-fault.c
+++ b/xen/arch/x86/pv/ro-page-fault.c
@@ -127,7 +127,7 @@ static int ptwr_emulated_update(unsigned long addr, paddr_t
old, paddr_t val,
/* Check the new PTE. */
nl1e = l1e_from_intpte(val);
- switch ( ret = get_page_from_l1e(nl1e, d, d) )
+ switch ( ret = get_page_from_l1e(nl1e, d, d, l1_disallow_mask(d)) )
{
default:
if ( is_pv_32bit_domain(d) && (bytes == 4) && (unaligned_addr & 4) &&
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 741c98575e..dca1831382 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -376,7 +376,8 @@ int put_page_type_preemptible(struct page_info *page);
int get_page_type_preemptible(struct page_info *page, unsigned long type);
int put_old_guest_table(struct vcpu *);
int get_page_from_l1e(
- l1_pgentry_t l1e, struct domain *l1e_owner, struct domain *pg_owner);
+ l1_pgentry_t l1e, struct domain *l1e_owner, struct domain *pg_owner,
+ uint32_t disallow_mask);
void put_page_from_l1e(l1_pgentry_t l1e, struct domain *l1e_owner);
static inline struct page_info *get_page_from_mfn(mfn_t mfn, struct domain *d)
--
2.11.0
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |