|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [XEN][PATCH] xen/x86: move d->arch.physaddr_bitsize field handling to pv32
From: Grygorii Strashko <grygorii_strashko@xxxxxxxx>
The d->arch.physaddr_bitsize field is used only by PV32 code, so:
- move domain_set_alloc_bitsize() function into PV32 code and clean up
unused domain_set_alloc_bitsize() defines from other arches
- move domain_clamp_alloc_bitsize() function into PV32 code,
rename to _domain_clamp_alloc_bitsize() and use generic
domain_clamp_alloc_bitsize() define instead, clean up
domain_clamp_alloc_bitsize() defines from !X86 arches
- move d->arch.physaddr_bitsize field under PV32 ifdef
Signed-off-by: Grygorii Strashko <grygorii_strashko@xxxxxxxx>
---
xen/arch/arm/include/asm/mm.h | 3 ---
xen/arch/ppc/include/asm/mm.h | 3 ---
xen/arch/riscv/include/asm/mm.h | 3 ---
xen/arch/x86/include/asm/domain.h | 2 +-
xen/arch/x86/include/asm/mm.h | 10 +++++++---
xen/arch/x86/pv/dom0_build.c | 2 ++
xen/arch/x86/pv/domain.c | 21 +++++++++++++++++++++
xen/arch/x86/x86_64/mm.c | 20 --------------------
xen/include/xen/mm.h | 4 ++++
9 files changed, 35 insertions(+), 33 deletions(-)
diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
index f702f4a0d676..ec2d2dc5372a 100644
--- a/xen/arch/arm/include/asm/mm.h
+++ b/xen/arch/arm/include/asm/mm.h
@@ -313,9 +313,6 @@ struct page_info *get_page_from_gva(struct vcpu *v, vaddr_t
va,
/* Arch-specific portion of memory_op hypercall. */
long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg);
-#define domain_set_alloc_bitsize(d) ((void)0)
-#define domain_clamp_alloc_bitsize(d, b) (b)
-
unsigned long domain_get_maximum_gpfn(struct domain *d);
/* Release all __init and __initdata ranges to be reused */
diff --git a/xen/arch/ppc/include/asm/mm.h b/xen/arch/ppc/include/asm/mm.h
index a33eeec43bd6..91c405876bd0 100644
--- a/xen/arch/ppc/include/asm/mm.h
+++ b/xen/arch/ppc/include/asm/mm.h
@@ -96,9 +96,6 @@ static inline struct page_info *virt_to_page(const void *v)
/* TODO: implement */
#define mfn_valid(mfn) ({ (void) (mfn); 0; })
-#define domain_set_alloc_bitsize(d) ((void)(d))
-#define domain_clamp_alloc_bitsize(d, b) (b)
-
#define PFN_ORDER(pfn_) ((pfn_)->v.free.order)
struct page_info
diff --git a/xen/arch/riscv/include/asm/mm.h b/xen/arch/riscv/include/asm/mm.h
index 9283616c0224..e5ea91fa4d0c 100644
--- a/xen/arch/riscv/include/asm/mm.h
+++ b/xen/arch/riscv/include/asm/mm.h
@@ -292,9 +292,6 @@ static inline bool arch_mfns_in_directmap(unsigned long
mfn, unsigned long nr)
/* TODO: implement */
#define mfn_valid(mfn) ({ (void)(mfn); 0; })
-#define domain_set_alloc_bitsize(d) ((void)(d))
-#define domain_clamp_alloc_bitsize(d, b) ((void)(d), (b))
-
#define PFN_ORDER(pg) ((pg)->v.free.order)
extern unsigned char cpu0_boot_stack[];
diff --git a/xen/arch/x86/include/asm/domain.h
b/xen/arch/x86/include/asm/domain.h
index 5df8c7825333..fca721ac482a 100644
--- a/xen/arch/x86/include/asm/domain.h
+++ b/xen/arch/x86/include/asm/domain.h
@@ -314,10 +314,10 @@ struct arch_domain
#ifdef CONFIG_PV32
unsigned int hv_compat_vstart;
-#endif
/* Maximum physical-address bitwidth supported by this guest. */
unsigned int physaddr_bitsize;
+#endif
/* I/O-port admin-specified access capabilities. */
struct rangeset *ioport_caps;
diff --git a/xen/arch/x86/include/asm/mm.h b/xen/arch/x86/include/asm/mm.h
index 08153e6d6fa2..8c10458f52c4 100644
--- a/xen/arch/x86/include/asm/mm.h
+++ b/xen/arch/x86/include/asm/mm.h
@@ -619,9 +619,6 @@ void __iomem *ioremap_wc(paddr_t pa, size_t len);
extern int memory_add(unsigned long spfn, unsigned long epfn, unsigned int
pxm);
-void domain_set_alloc_bitsize(struct domain *d);
-unsigned int domain_clamp_alloc_bitsize(struct domain *d, unsigned int bits);
-
unsigned long domain_get_maximum_gpfn(struct domain *d);
/* Definition of an mm lock: spinlock with extra fields for debugging */
@@ -659,4 +656,11 @@ static inline bool arch_mfns_in_directmap(unsigned long
mfn, unsigned long nr)
return (mfn + nr) <= (virt_to_mfn(eva - 1) + 1);
}
+#ifdef CONFIG_PV32
+unsigned int _domain_clamp_alloc_bitsize(const struct domain *d,
+ unsigned int bits);
+#define domain_clamp_alloc_bitsize(d, bits)
\
+ _domain_clamp_alloc_bitsize((d), (bits))
+#endif
+
#endif /* __ASM_X86_MM_H__ */
diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c
index 21158ce1812e..6748c639cdc1 100644
--- a/xen/arch/x86/pv/dom0_build.c
+++ b/xen/arch/x86/pv/dom0_build.c
@@ -626,6 +626,7 @@ static int __init dom0_construct(const struct boot_domain
*bd)
initrd_mfn = paddr_to_pfn(initrd->start);
mfn = initrd_mfn;
count = PFN_UP(initrd_len);
+#ifdef CONFIG_PV32
if ( d->arch.physaddr_bitsize &&
((mfn + count - 1) >> (d->arch.physaddr_bitsize - PAGE_SHIFT)) )
{
@@ -650,6 +651,7 @@ static int __init dom0_construct(const struct boot_domain
*bd)
initrd->start = pfn_to_paddr(initrd_mfn);
}
else
+#endif
{
while ( count-- )
if ( assign_pages(mfn_to_page(_mfn(mfn++)), 1, d, 0) )
diff --git a/xen/arch/x86/pv/domain.c b/xen/arch/x86/pv/domain.c
index 9c4785c187dd..1cdcb9f89c54 100644
--- a/xen/arch/x86/pv/domain.c
+++ b/xen/arch/x86/pv/domain.c
@@ -230,6 +230,27 @@ unsigned long pv_make_cr4(const struct vcpu *v)
}
#ifdef CONFIG_PV32
+unsigned int _domain_clamp_alloc_bitsize(const struct domain *d,
+ unsigned int bits)
+{
+ if ( (d == NULL) || (d->arch.physaddr_bitsize == 0) )
+ return bits;
+ return min(d->arch.physaddr_bitsize, bits);
+}
+
+static void domain_set_alloc_bitsize(struct domain *d)
+{
+ if ( !is_pv_32bit_domain(d) ||
+ (MACH2PHYS_COMPAT_NR_ENTRIES(d) >= max_page) ||
+ d->arch.physaddr_bitsize > 0 )
+ return;
+ d->arch.physaddr_bitsize =
+ /* 2^n entries can be contained in guest's p2m mapping space */
+ fls(MACH2PHYS_COMPAT_NR_ENTRIES(d)) - 1
+ /* 2^n pages -> 2^(n+PAGE_SHIFT) bits */
+ + PAGE_SHIFT;
+}
+
int switch_compat(struct domain *d)
{
struct vcpu *v;
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index d4e6a9c0a2e0..8eadab7933d0 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -1119,26 +1119,6 @@ unmap:
return ret;
}
-void domain_set_alloc_bitsize(struct domain *d)
-{
- if ( !is_pv_32bit_domain(d) ||
- (MACH2PHYS_COMPAT_NR_ENTRIES(d) >= max_page) ||
- d->arch.physaddr_bitsize > 0 )
- return;
- d->arch.physaddr_bitsize =
- /* 2^n entries can be contained in guest's p2m mapping space */
- fls(MACH2PHYS_COMPAT_NR_ENTRIES(d)) - 1
- /* 2^n pages -> 2^(n+PAGE_SHIFT) bits */
- + PAGE_SHIFT;
-}
-
-unsigned int domain_clamp_alloc_bitsize(struct domain *d, unsigned int bits)
-{
- if ( (d == NULL) || (d->arch.physaddr_bitsize == 0) )
- return bits;
- return min(d->arch.physaddr_bitsize, bits);
-}
-
static int transfer_pages_to_heap(struct mem_hotadd_info *info)
{
unsigned long i;
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index b968f47b87e0..6a66fc7a05c8 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -641,4 +641,8 @@ static inline void put_page_alloc_ref(struct page_info
*page)
}
}
+#ifndef domain_clamp_alloc_bitsize
+#define domain_clamp_alloc_bitsize(d, b) ((void)(d), (b))
+#endif
+
#endif /* __XEN_MM_H__ */
--
2.34.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |