|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 05/17] xen/x86: Remove the non-typesafe version of pagetable_* helpers
From: Julien Grall <jgrall@xxxxxxxxxx>
Most of the users of the pagetable_* helpers can use the typesafe
version. Therefore, it is time to convert the callers still using
non-typesafe version to use the typesafe one.
Some part of the code assume that a pagetable is NULL when the MFN 0.
When possible this is replaced with the helper pagetable_is_null().
There are still someplace which test against MFN 0 and it is not clear
if other unconverted part of the code rely on the value. So, for now,
the NULL value is not changed to INVALID_MFN.
No functional changes intented.
Signed-off-by: Julien Grall <jgrall@xxxxxxxxxx>
---
xen/arch/x86/domain.c | 18 ++++++++-------
xen/arch/x86/domctl.c | 6 ++---
xen/arch/x86/hvm/vmx/vmcs.c | 2 +-
xen/arch/x86/hvm/vmx/vmx.c | 2 +-
xen/arch/x86/hvm/vmx/vvmx.c | 2 +-
xen/arch/x86/mm.c | 40 +++++++++++++++++-----------------
xen/arch/x86/mm/hap/hap.c | 2 +-
xen/arch/x86/mm/p2m-ept.c | 2 +-
xen/arch/x86/mm/p2m-pt.c | 4 ++--
xen/arch/x86/mm/p2m.c | 2 +-
xen/arch/x86/mm/shadow/multi.c | 24 ++++++++++----------
xen/arch/x86/pv/dom0_build.c | 10 ++++-----
xen/arch/x86/traps.c | 6 ++---
xen/include/asm-x86/page.h | 19 ++++++++--------
14 files changed, 70 insertions(+), 69 deletions(-)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 15750ce210..18d8fda9bd 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -952,25 +952,27 @@ int arch_set_info_guest(
}
else
{
- unsigned long pfn = pagetable_get_pfn(v->arch.guest_table);
+ mfn_t mfn = pagetable_get_mfn(v->arch.guest_table);
bool fail;
if ( !compat )
{
- fail = xen_pfn_to_cr3(pfn) != c.nat->ctrlreg[3];
+ fail = mfn_to_cr3(mfn) != c.nat->ctrlreg[3];
if ( pagetable_is_null(v->arch.guest_table_user) )
fail |= c.nat->ctrlreg[1] || !(flags & VGCF_in_kernel);
else
{
- pfn = pagetable_get_pfn(v->arch.guest_table_user);
- fail |= xen_pfn_to_cr3(pfn) != c.nat->ctrlreg[1];
+ mfn = pagetable_get_mfn(v->arch.guest_table_user);
+ fail |= mfn_to_cr3(mfn) != c.nat->ctrlreg[1];
}
- } else {
- l4_pgentry_t *l4tab = map_domain_page(_mfn(pfn));
+ }
+ else
+ {
+ l4_pgentry_t *l4tab = map_domain_page(mfn);
- pfn = l4e_get_pfn(*l4tab);
+ mfn = l4e_get_mfn(*l4tab);
unmap_domain_page(l4tab);
- fail = compat_pfn_to_cr3(pfn) != c.cmp->ctrlreg[3];
+ fail = compat_pfn_to_cr3(mfn_x(mfn)) != c.cmp->ctrlreg[3];
}
for ( i = 0; i < ARRAY_SIZE(v->arch.pv.gdt_frames); ++i )
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index ed86762fa6..02596c3810 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1611,11 +1611,11 @@ void arch_get_info_guest(struct vcpu *v,
vcpu_guest_context_u c)
if ( !compat )
{
- c.nat->ctrlreg[3] = xen_pfn_to_cr3(
- pagetable_get_pfn(v->arch.guest_table));
+ c.nat->ctrlreg[3] = mfn_to_cr3(
+ pagetable_get_mfn(v->arch.guest_table));
c.nat->ctrlreg[1] =
pagetable_is_null(v->arch.guest_table_user) ? 0
- : xen_pfn_to_cr3(pagetable_get_pfn(v->arch.guest_table_user));
+ : mfn_to_cr3(pagetable_get_mfn(v->arch.guest_table_user));
}
else
{
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 4c23645454..1f39367253 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1290,7 +1290,7 @@ static int construct_vmcs(struct vcpu *v)
struct p2m_domain *p2m = p2m_get_hostp2m(d);
struct ept_data *ept = &p2m->ept;
- ept->mfn = pagetable_get_pfn(p2m_get_pagetable(p2m));
+ ept->mfn = mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m)));
__vmwrite(EPT_POINTER, ept->eptp);
__vmwrite(HOST_PAT, XEN_MSR_PAT);
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index d265ed46ad..a1e3a19c0a 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2110,7 +2110,7 @@ static void vmx_vcpu_update_eptp(struct vcpu *v)
p2m = p2m_get_hostp2m(d);
ept = &p2m->ept;
- ept->mfn = pagetable_get_pfn(p2m_get_pagetable(p2m));
+ ept->mfn = mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m)));
vmx_vmcs_enter(v);
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index f049920196..84b47ef277 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1149,7 +1149,7 @@ static uint64_t get_shadow_eptp(struct vcpu *v)
struct p2m_domain *p2m = p2m_get_nestedp2m(v);
struct ept_data *ept = &p2m->ept;
- ept->mfn = pagetable_get_pfn(p2m_get_pagetable(p2m));
+ ept->mfn = mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m)));
return ept->eptp;
}
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 7c0f81759a..aa0bf3d0ee 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -3085,7 +3085,7 @@ int put_old_guest_table(struct vcpu *v)
int vcpu_destroy_pagetables(struct vcpu *v)
{
- unsigned long mfn = pagetable_get_pfn(v->arch.guest_table);
+ mfn_t mfn = pagetable_get_mfn(v->arch.guest_table);
struct page_info *page = NULL;
int rc = put_old_guest_table(v);
bool put_guest_table_user = false;
@@ -3102,9 +3102,9 @@ int vcpu_destroy_pagetables(struct vcpu *v)
*/
if ( is_pv_32bit_vcpu(v) )
{
- l4_pgentry_t *l4tab = map_domain_page(_mfn(mfn));
+ l4_pgentry_t *l4tab = map_domain_page(mfn);
- mfn = l4e_get_pfn(*l4tab);
+ mfn = l4e_get_mfn(*l4tab);
l4e_write(l4tab, l4e_empty());
unmap_domain_page(l4tab);
}
@@ -3116,24 +3116,24 @@ int vcpu_destroy_pagetables(struct vcpu *v)
/* Free that page if non-zero */
do {
- if ( mfn )
+ if ( !mfn_eq(mfn, _mfn(0)) )
{
- page = mfn_to_page(_mfn(mfn));
+ page = mfn_to_page(mfn);
if ( paging_mode_refcounts(v->domain) )
put_page(page);
else
rc = put_page_and_type_preemptible(page);
- mfn = 0;
+ mfn = _mfn(0);
}
if ( !rc && put_guest_table_user )
{
/* Drop ref to guest_table_user (from MMUEXT_NEW_USER_BASEPTR) */
- mfn = pagetable_get_pfn(v->arch.guest_table_user);
+ mfn = pagetable_get_mfn(v->arch.guest_table_user);
v->arch.guest_table_user = pagetable_null();
put_guest_table_user = false;
}
- } while ( mfn );
+ } while ( !mfn_eq(mfn, _mfn(0)) );
/*
* If a "put" operation was interrupted, finish things off in
@@ -3551,7 +3551,8 @@ long do_mmuext_op(
break;
case MMUEXT_NEW_USER_BASEPTR: {
- unsigned long old_mfn;
+ mfn_t old_mfn;
+ mfn_t new_mfn = _mfn(op.arg1.mfn);
if ( unlikely(currd != pg_owner) )
rc = -EPERM;
@@ -3560,19 +3561,18 @@ long do_mmuext_op(
if ( unlikely(rc) )
break;
- old_mfn = pagetable_get_pfn(curr->arch.guest_table_user);
+ old_mfn = pagetable_get_mfn(curr->arch.guest_table_user);
/*
* This is particularly important when getting restarted after the
* previous attempt got preempted in the put-old-MFN phase.
*/
- if ( old_mfn == op.arg1.mfn )
+ if ( mfn_eq(old_mfn, new_mfn) )
break;
- if ( op.arg1.mfn != 0 )
+ if ( !mfn_eq(new_mfn, _mfn(0)) )
{
- rc = get_page_and_type_from_mfn(
- _mfn(op.arg1.mfn), PGT_root_page_table, currd,
PTF_preemptible);
-
+ rc = get_page_and_type_from_mfn(new_mfn, PGT_root_page_table,
+ currd, PTF_preemptible);
if ( unlikely(rc) )
{
if ( rc == -EINTR )
@@ -3580,19 +3580,19 @@ long do_mmuext_op(
else if ( rc != -ERESTART )
gdprintk(XENLOG_WARNING,
"Error %d installing new mfn %" PRI_mfn "\n",
- rc, op.arg1.mfn);
+ rc, mfn_x(new_mfn));
break;
}
if ( VM_ASSIST(currd, m2p_strict) )
- zap_ro_mpt(_mfn(op.arg1.mfn));
+ zap_ro_mpt(new_mfn);
}
- curr->arch.guest_table_user = pagetable_from_pfn(op.arg1.mfn);
+ curr->arch.guest_table_user = pagetable_from_mfn(new_mfn);
- if ( old_mfn != 0 )
+ if ( !mfn_eq(old_mfn, _mfn(0)) )
{
- page = mfn_to_page(_mfn(old_mfn));
+ page = mfn_to_page(old_mfn);
switch ( rc = put_page_and_type_preemptible(page) )
{
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index a6d5e39b02..051e92169a 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -394,7 +394,7 @@ static mfn_t hap_make_monitor_table(struct vcpu *v)
l4_pgentry_t *l4e;
mfn_t m4mfn;
- ASSERT(pagetable_get_pfn(v->arch.monitor_table) == 0);
+ ASSERT(pagetable_is_null(v->arch.monitor_table));
if ( (pg = hap_alloc(d)) == NULL )
goto oom;
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index eb0f0edfef..346696e469 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -1366,7 +1366,7 @@ void p2m_init_altp2m_ept(struct domain *d, unsigned int i)
p2m->ept.ad = hostp2m->ept.ad;
ept = &p2m->ept;
- ept->mfn = pagetable_get_pfn(p2m_get_pagetable(p2m));
+ ept->mfn = mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m)));
d->arch.altp2m_eptp[array_index_nospec(i, MAX_EPTP)] = ept->eptp;
}
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index eb66077496..cccb06c26e 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -867,7 +867,7 @@ static void p2m_pt_change_entry_type_global(struct
p2m_domain *p2m,
unsigned long gfn = 0;
unsigned int i, changed;
- if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) == 0 )
+ if ( pagetable_is_null(p2m_get_pagetable(p2m)) )
return;
ASSERT(hap_enabled(p2m->domain));
@@ -950,7 +950,7 @@ long p2m_pt_audit_p2m(struct p2m_domain *p2m)
ASSERT(pod_locked_by_me(p2m));
/* Audit part one: walk the domain's p2m table, checking the entries. */
- if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) != 0 )
+ if ( !pagetable_is_null(p2m_get_pagetable(p2m)) )
{
l2_pgentry_t *l2e;
l1_pgentry_t *l1e;
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 9f51370327..45b4b784d3 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -702,7 +702,7 @@ int p2m_alloc_table(struct p2m_domain *p2m)
return -EINVAL;
}
- if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) != 0 )
+ if ( !pagetable_is_null(p2m_get_pagetable(p2m)) )
{
P2M_ERROR("p2m already allocated for this domain\n");
p2m_unlock(p2m);
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index b6afc0fba4..5751dae344 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -1520,7 +1520,7 @@ sh_make_monitor_table(struct vcpu *v)
{
struct domain *d = v->domain;
- ASSERT(pagetable_get_pfn(v->arch.monitor_table) == 0);
+ ASSERT(pagetable_is_null(v->arch.monitor_table));
/* Guarantee we can get the memory we need */
shadow_prealloc(d, SH_type_monitor_table, CONFIG_PAGING_LEVELS);
@@ -2351,11 +2351,11 @@ int sh_safe_not_to_sync(struct vcpu *v, mfn_t gl1mfn)
ASSERT(mfn_valid(smfn));
#endif
- if ( pagetable_get_pfn(v->arch.shadow_table[0]) == mfn_x(smfn)
+ if ( mfn_eq(pagetable_get_mfn(v->arch.shadow_table[0]), smfn)
#if (SHADOW_PAGING_LEVELS == 3)
- || pagetable_get_pfn(v->arch.shadow_table[1]) == mfn_x(smfn)
- || pagetable_get_pfn(v->arch.shadow_table[2]) == mfn_x(smfn)
- || pagetable_get_pfn(v->arch.shadow_table[3]) == mfn_x(smfn)
+ || mfn_eq(pagetable_get_mfn(v->arch.shadow_table[1]), smfn)
+ || mfn_eq(pagetable_get_mfn(v->arch.shadow_table[2]), smfn)
+ || mfn_eq(pagetable_get_mfn(v->arch.shadow_table[3]), smfn)
#endif
)
return 0;
@@ -3707,7 +3707,7 @@ sh_update_linear_entries(struct vcpu *v)
/* Don't try to update the monitor table if it doesn't exist */
if ( shadow_mode_external(d)
- && pagetable_get_pfn(v->arch.monitor_table) == 0 )
+ && pagetable_is_null(v->arch.monitor_table) )
return;
#if SHADOW_PAGING_LEVELS == 4
@@ -3722,7 +3722,7 @@ sh_update_linear_entries(struct vcpu *v)
if ( v == current )
{
__linear_l4_table[l4_linear_offset(SH_LINEAR_PT_VIRT_START)] =
- l4e_from_pfn(pagetable_get_pfn(v->arch.shadow_table[0]),
+ l4e_from_mfn(pagetable_get_mfn(v->arch.shadow_table[0]),
__PAGE_HYPERVISOR_RW);
}
else
@@ -3730,7 +3730,7 @@ sh_update_linear_entries(struct vcpu *v)
l4_pgentry_t *ml4e;
ml4e = map_domain_page(pagetable_get_mfn(v->arch.monitor_table));
ml4e[l4_table_offset(SH_LINEAR_PT_VIRT_START)] =
- l4e_from_pfn(pagetable_get_pfn(v->arch.shadow_table[0]),
+ l4e_from_mfn(pagetable_get_mfn(v->arch.shadow_table[0]),
__PAGE_HYPERVISOR_RW);
unmap_domain_page(ml4e);
}
@@ -3964,15 +3964,15 @@ sh_update_cr3(struct vcpu *v, int do_locking, bool
noflush)
{
ASSERT(shadow_mode_external(d));
if ( hvm_paging_enabled(v) )
- ASSERT(pagetable_get_pfn(v->arch.guest_table));
+ ASSERT(!pagetable_is_null(v->arch.guest_table));
else
- ASSERT(v->arch.guest_table.pfn
- == d->arch.paging.shadow.unpaged_pagetable.pfn);
+ ASSERT(mfn_eq(pagetable_get_mfn(v->arch.guest_table),
+
pagetable_get_mfn(d->arch.paging.shadow.unpaged_pagetable)));
}
#endif
SHADOW_PRINTK("%pv guest_table=%"PRI_mfn"\n",
- v, (unsigned long)pagetable_get_pfn(v->arch.guest_table));
+ v, mfn_x(pagetable_get_mfn(v->arch.guest_table)));
#if GUEST_PAGING_LEVELS == 4
if ( !(v->arch.flags & TF_kernel_mode) )
diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c
index 30846b5f97..8abd5d255c 100644
--- a/xen/arch/x86/pv/dom0_build.c
+++ b/xen/arch/x86/pv/dom0_build.c
@@ -93,14 +93,14 @@ static __init void mark_pv_pt_pages_rdonly(struct domain *d,
}
}
-static __init void setup_pv_physmap(struct domain *d, unsigned long pgtbl_pfn,
+static __init void setup_pv_physmap(struct domain *d, mfn_t pgtbl_mfn,
unsigned long v_start, unsigned long v_end,
unsigned long vphysmap_start,
unsigned long vphysmap_end,
unsigned long nr_pages)
{
struct page_info *page = NULL;
- l4_pgentry_t *pl4e, *l4start = map_domain_page(_mfn(pgtbl_pfn));
+ l4_pgentry_t *pl4e, *l4start = map_domain_page(pgtbl_mfn);
l3_pgentry_t *pl3e = NULL;
l2_pgentry_t *pl2e = NULL;
l1_pgentry_t *pl1e = NULL;
@@ -760,11 +760,9 @@ int __init dom0_construct_pv(struct domain *d,
/* Set up the phys->machine table if not part of the initial mapping. */
if ( parms.p2m_base != UNSET_ADDR )
- {
- pfn = pagetable_get_pfn(v->arch.guest_table);
- setup_pv_physmap(d, pfn, v_start, v_end, vphysmap_start, vphysmap_end,
+ setup_pv_physmap(d, pagetable_get_mfn(v->arch.guest_table),
+ v_start, v_end, vphysmap_start, vphysmap_end,
nr_pages);
- }
/* Write the phys->machine and machine->phys table entries. */
for ( pfn = 0; pfn < count; pfn++ )
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 4aa7c35be4..04a3ebc0a2 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -247,12 +247,12 @@ static void compat_show_guest_stack(struct vcpu *v,
if ( v != current )
{
struct vcpu *vcpu;
- unsigned long mfn;
+ mfn_t mfn;
ASSERT(guest_kernel_mode(v, regs));
- mfn = read_cr3() >> PAGE_SHIFT;
+ mfn = cr3_to_mfn(read_cr3());
for_each_vcpu( v->domain, vcpu )
- if ( pagetable_get_pfn(vcpu->arch.guest_table) == mfn )
+ if ( mfn_eq(pagetable_get_mfn(vcpu->arch.guest_table), mfn) )
break;
if ( !vcpu )
{
diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
index 624dbbb949..377ba14f6e 100644
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -18,6 +18,7 @@
#ifndef __ASSEMBLY__
# include <asm/types.h>
# include <xen/lib.h>
+# include <xen/mm_types.h>
#endif
#include <asm/x86_64/page.h>
@@ -213,17 +214,17 @@ static inline l4_pgentry_t l4e_from_paddr(paddr_t pa,
unsigned int flags)
#ifndef __ASSEMBLY__
/* Page-table type. */
-typedef struct { u64 pfn; } pagetable_t;
-#define pagetable_get_paddr(x) ((paddr_t)(x).pfn << PAGE_SHIFT)
+typedef struct { mfn_t mfn; } pagetable_t;
+#define PAGETABLE_NULL_MFN _mfn(0)
+
+#define pagetable_get_paddr(x) mfn_to_maddr((x).mfn)
#define pagetable_get_page(x) mfn_to_page(pagetable_get_mfn(x))
-#define pagetable_get_pfn(x) ((x).pfn)
-#define pagetable_get_mfn(x) _mfn(((x).pfn))
-#define pagetable_is_null(x) ((x).pfn == 0)
-#define pagetable_from_pfn(pfn) ((pagetable_t) { (pfn) })
-#define pagetable_from_mfn(mfn) ((pagetable_t) { mfn_x(mfn) })
+#define pagetable_get_mfn(x) ((x).mfn)
+#define pagetable_is_null(x) mfn_eq((x).mfn, PAGETABLE_NULL_MFN)
+#define pagetable_from_mfn(mfn) ((pagetable_t) { mfn })
#define pagetable_from_page(pg) pagetable_from_mfn(page_to_mfn(pg))
-#define pagetable_from_paddr(p) pagetable_from_pfn((p)>>PAGE_SHIFT)
-#define pagetable_null() pagetable_from_pfn(0)
+#define pagetable_from_paddr(p) pagetable_from_mfn(maddr_to_mfn(p))
+#define pagetable_null() pagetable_from_mfn(PAGETABLE_NULL_MFN)
void clear_page_sse2(void *);
void copy_page_sse2(void *, const void *);
--
2.17.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |