|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2] x86/shadow: Correct guest behaviour when creating PTEs above maxphysaddr
XSA-173 (c/s 8b1764833) introduces gfn_bits, and an upper limit which might be
lower than the real maxphysaddr, to avoid overflowing the superpage shadow
backpointer.
However, plenty of hardware has a physical address width less that 44 bits,
and the code added in shadow_domain_init() is a straight assignment. This
causes gfn_bits to be increased beyond the physical address width on most
Intel consumer hardware (typically a width of 39, which is the number reported
to the guest via CPUID).
If the guest intentionally creates a PTE referencing a physical address
between 39 and 44 bits, the result should be #PF[RSVD] for using the virtual
address. However, the shadow code accepts the PTE, shadows it, and the
virtual address works normally.
Introduce paging_max_paddr_bits() to calculate the largest guest physical
address supportable by the paging infrastructure, and update
recalculate_cpuid_policy() to take this into account when clamping the guests
cpuid_policy to reality. Remove gfn_bits and rework its users in terms of a
guests maxphysaddr.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Tim Deegan <tim@xxxxxxx>
CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
CC: Jun Nakajima <jun.nakajima@xxxxxxxxx>
CC: Kevin Tian <kevin.tian@xxxxxxxxx>
v2:
* Introduce paging_max_paddr_bits() rather than moving paging logic into
recalculate_cpuid_policy().
* Rewrite half of the commit message.
---
xen/arch/x86/cpuid.c | 7 +++----
xen/arch/x86/hvm/vmx/vvmx.c | 2 +-
xen/arch/x86/mm/guest_walk.c | 3 ++-
xen/arch/x86/mm/hap/hap.c | 2 --
xen/arch/x86/mm/p2m.c | 3 ++-
xen/arch/x86/mm/shadow/common.c | 10 ----------
xen/arch/x86/mm/shadow/multi.c | 3 ++-
xen/include/asm-x86/domain.h | 3 ---
xen/include/asm-x86/paging.h | 16 ++++++++++++++++
9 files changed, 26 insertions(+), 23 deletions(-)
diff --git a/xen/arch/x86/cpuid.c b/xen/arch/x86/cpuid.c
index e0a387e..3378f7a 100644
--- a/xen/arch/x86/cpuid.c
+++ b/xen/arch/x86/cpuid.c
@@ -6,6 +6,7 @@
#include <asm/hvm/nestedhvm.h>
#include <asm/hvm/svm/svm.h>
#include <asm/hvm/vmx/vmcs.h>
+#include <asm/paging.h>
#include <asm/processor.h>
#include <asm/xstate.h>
@@ -502,11 +503,9 @@ void recalculate_cpuid_policy(struct domain *d)
cpuid_featureset_to_policy(fs, p);
- p->extd.maxphysaddr = min(p->extd.maxphysaddr, max->extd.maxphysaddr);
p->extd.maxphysaddr = min_t(uint8_t, p->extd.maxphysaddr,
- d->arch.paging.gfn_bits + PAGE_SHIFT);
- p->extd.maxphysaddr = max_t(uint8_t, p->extd.maxphysaddr,
- (p->basic.pae || p->basic.pse36) ? 36 : 32);
+ paging_max_paddr_bits(d));
+ p->extd.maxphysaddr = max_t(uint8_t, p->extd.maxphysaddr, 32);
p->extd.maxlinaddr = p->extd.lm ? 48 : 32;
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 9c61b5b..774a11f 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1381,7 +1381,7 @@ int nvmx_handle_vmxon(struct cpu_user_regs *regs)
}
if ( (gpa & ~PAGE_MASK) ||
- (gpa >> (v->domain->arch.paging.gfn_bits + PAGE_SHIFT)) )
+ (gpa >> v->domain->arch.cpuid->extd.maxphysaddr) )
{
vmfail_invalid(regs);
return X86EMUL_OKAY;
diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
index a67fd5a..5ad8cf6 100644
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -435,7 +435,8 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
/* If this guest has a restricted physical address space then the
* target GFN must fit within it. */
if ( !(rc & _PAGE_PRESENT)
- && gfn_x(guest_l1e_get_gfn(gw->l1e)) >> d->arch.paging.gfn_bits )
+ && gfn_x(guest_l1e_get_gfn(gw->l1e)) >>
+ (d->arch.cpuid->extd.maxphysaddr - PAGE_SHIFT) )
rc |= _PAGE_INVALID_BITS;
return rc;
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index 6dbb3cc..928cd5e 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -447,8 +447,6 @@ void hap_domain_init(struct domain *d)
{
INIT_PAGE_LIST_HEAD(&d->arch.paging.hap.freelist);
- d->arch.paging.gfn_bits = hap_paddr_bits - PAGE_SHIFT;
-
/* Use HAP logdirty mechanism. */
paging_log_dirty_init(d, hap_enable_log_dirty,
hap_disable_log_dirty,
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index db33153..69c69c7 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1783,7 +1783,8 @@ void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn,
mfn_t *mfn,
{
struct page_info *page;
- if ( gfn_x(gfn) >> p2m->domain->arch.paging.gfn_bits )
+ if ( gfn_x(gfn) >>
+ (p2m->domain->arch.cpuid->extd.maxphysaddr - PAGE_SHIFT) )
{
*rc = _PAGE_INVALID_BIT;
return NULL;
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index a619d65..2235a0a 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -52,16 +52,6 @@ int shadow_domain_init(struct domain *d, unsigned int
domcr_flags)
INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.freelist);
INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.pinned_shadows);
- d->arch.paging.gfn_bits = paddr_bits - PAGE_SHIFT;
-#ifndef CONFIG_BIGMEM
- /*
- * Shadowed superpages store GFNs in 32-bit page_info fields.
- * Note that we cannot use guest_supports_superpages() here.
- */
- if ( !is_pv_domain(d) || opt_allow_superpage )
- d->arch.paging.gfn_bits = 32;
-#endif
-
/* Use shadow pagetables for log-dirty support */
paging_log_dirty_init(d, sh_enable_log_dirty,
sh_disable_log_dirty, sh_clean_dirty_bitmap);
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index d4090d7..e951daf 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -537,7 +537,8 @@ _sh_propagate(struct vcpu *v,
/* Check there's something for the shadows to map to */
if ( (!p2m_is_valid(p2mt) && !p2m_is_grant(p2mt))
- || gfn_x(target_gfn) >> d->arch.paging.gfn_bits )
+ || gfn_x(target_gfn) >>
+ (d->arch.cpuid->extd.maxphysaddr - PAGE_SHIFT) )
{
*sp = shadow_l1e_empty();
goto done;
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index e6c7e13..2270e96 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -195,9 +195,6 @@ struct paging_domain {
/* log dirty support */
struct log_dirty_domain log_dirty;
- /* Number of valid bits in a gfn. */
- unsigned int gfn_bits;
-
/* preemption handling */
struct {
const struct domain *dom;
diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
index cec6bfd..5c2df8a 100644
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -25,6 +25,7 @@
#define _XEN_PAGING_H
#include <xen/mm.h>
+#include <xen/kconfig.h>
#include <public/domctl.h>
#include <xen/sched.h>
#include <xen/perfc.h>
@@ -360,6 +361,21 @@ void paging_dump_vcpu_info(struct vcpu *v);
int paging_set_allocation(struct domain *d, unsigned int pages,
bool *preempted);
+/* Maxphysaddr supportable by the paging infrastructure. */
+static inline unsigned int paging_max_paddr_bits(const struct domain *d)
+{
+ unsigned int bits = paging_mode_hap(d) ? hap_paddr_bits : paddr_bits;
+
+ if ( !IS_ENABLED(BIGMEM) && paging_mode_shadow(d) &&
+ (!is_pv_domain(d) || opt_allow_superpage) )
+ {
+ /* Shadowed superpages store GFNs in 32-bit page_info fields. */
+ bits = min(bits, 32U + PAGE_SHIFT);
+ }
+
+ return bits;
+}
+
#endif /* XEN_PAGING_H */
/*
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |