|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC 01/12] x86/paging: introduce paging_set_allocation
... and remove hap_set_alloc_for_pvh_dom0.
Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Cc: Tim Deegan <tim@xxxxxxx>
---
xen/arch/x86/domain_build.c | 7 +++----
xen/arch/x86/mm/hap/hap.c | 14 +-------------
xen/arch/x86/mm/paging.c | 16 ++++++++++++++++
xen/arch/x86/mm/shadow/common.c | 5 ++---
xen/include/asm-x86/hap.h | 3 ++-
xen/include/asm-x86/paging.h | 3 +++
xen/include/asm-x86/shadow.h | 6 ++++++
7 files changed, 33 insertions(+), 21 deletions(-)
diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
index 0a02d65..d7d4afc 100644
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -35,7 +35,6 @@
#include <asm/setup.h>
#include <asm/bzimage.h> /* for bzimage_parse */
#include <asm/io_apic.h>
-#include <asm/hap.h>
#include <asm/hpet.h>
#include <public/version.h>
@@ -1383,15 +1382,15 @@ int __init construct_dom0(
nr_pages);
}
- if ( is_pvh_domain(d) )
- hap_set_alloc_for_pvh_dom0(d, dom0_paging_pages(d, nr_pages));
-
/*
* We enable paging mode again so guest_physmap_add_page will do the
* right thing for us.
*/
d->arch.paging.mode = save_pvh_pg_mode;
+ if ( is_pvh_domain(d) )
+ paging_set_allocation(d, dom0_paging_pages(d, nr_pages));
+
/* Write the phys->machine and machine->phys table entries. */
for ( pfn = 0; pfn < count; pfn++ )
{
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index 3218fa2..b49b38f 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -334,7 +334,7 @@ hap_get_allocation(struct domain *d)
/* Set the pool of pages to the required number of pages.
* Returns 0 for success, non-zero for failure. */
-static unsigned int
+unsigned int
hap_set_allocation(struct domain *d, unsigned int pages, int *preempted)
{
struct page_info *pg;
@@ -638,18 +638,6 @@ int hap_domctl(struct domain *d, xen_domctl_shadow_op_t
*sc,
}
}
-void __init hap_set_alloc_for_pvh_dom0(struct domain *d,
- unsigned long hap_pages)
-{
- int rc;
-
- paging_lock(d);
- rc = hap_set_allocation(d, hap_pages, NULL);
- paging_unlock(d);
-
- BUG_ON(rc);
-}
-
static const struct paging_mode hap_paging_real_mode;
static const struct paging_mode hap_paging_protected_mode;
static const struct paging_mode hap_paging_pae_mode;
diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index 107fc8c..1b270df 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -953,6 +953,22 @@ void paging_write_p2m_entry(struct p2m_domain *p2m,
unsigned long gfn,
safe_write_pte(p, new);
}
+int paging_set_allocation(struct domain *d, unsigned long pages)
+{
+ int rc;
+
+ ASSERT(paging_mode_enabled(d));
+
+ paging_lock(d);
+ if ( hap_enabled(d) )
+ rc = hap_set_allocation(d, pages, NULL);
+ else
+ rc = sh_set_allocation(d, pages, NULL);
+ paging_unlock(d);
+
+ return rc;
+}
+
/*
* Local variables:
* mode: C
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index c22362f..452e22e 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -1604,9 +1604,8 @@ shadow_free_p2m_page(struct domain *d, struct page_info
*pg)
* Input will be rounded up to at least shadow_min_acceptable_pages(),
* plus space for the p2m table.
* Returns 0 for success, non-zero for failure. */
-static unsigned int sh_set_allocation(struct domain *d,
- unsigned int pages,
- int *preempted)
+unsigned int sh_set_allocation(struct domain *d, unsigned int pages,
+ int *preempted)
{
struct page_info *sp;
unsigned int lower_bound;
diff --git a/xen/include/asm-x86/hap.h b/xen/include/asm-x86/hap.h
index c613836..e3c9c98 100644
--- a/xen/include/asm-x86/hap.h
+++ b/xen/include/asm-x86/hap.h
@@ -46,7 +46,8 @@ int hap_track_dirty_vram(struct domain *d,
XEN_GUEST_HANDLE_64(uint8) dirty_bitmap);
extern const struct paging_mode *hap_paging_get_mode(struct vcpu *);
-void hap_set_alloc_for_pvh_dom0(struct domain *d, unsigned long num_pages);
+unsigned int hap_set_allocation(struct domain *d, unsigned int pages,
+ int *preempted);
#endif /* XEN_HAP_H */
diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
index a1401ab..6598007 100644
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -347,6 +347,9 @@ void pagetable_dying(struct domain *d, paddr_t gpa);
void paging_dump_domain_info(struct domain *d);
void paging_dump_vcpu_info(struct vcpu *v);
+/* Set pool of pages for paging. */
+int paging_set_allocation(struct domain *d, unsigned long pages);
+
#endif /* XEN_PAGING_H */
/*
diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
index 6d0aefb..bc068ec 100644
--- a/xen/include/asm-x86/shadow.h
+++ b/xen/include/asm-x86/shadow.h
@@ -83,6 +83,10 @@ void sh_remove_shadows(struct domain *d, mfn_t gmfn, int
fast, int all);
/* Discard _all_ mappings from the domain's shadows. */
void shadow_blow_tables_per_domain(struct domain *d);
+/* Set the pool of shadow pages to the required number of pages. */
+unsigned int sh_set_allocation(struct domain *d, unsigned int pages,
+ int *preempted);
+
#else /* !CONFIG_SHADOW_PAGING */
#define shadow_teardown(d, p) ASSERT(is_pv_domain(d))
@@ -91,6 +95,8 @@ void shadow_blow_tables_per_domain(struct domain *d);
({ ASSERT(is_pv_domain(d)); -EOPNOTSUPP; })
#define shadow_track_dirty_vram(d, begin_pfn, nr, bitmap) \
({ ASSERT_UNREACHABLE(); -EOPNOTSUPP; })
+#define sh_set_allocation(d, pages, preempted) \
+ ({ ASSERT_UNREACHABLE(); -EOPNOTSUPP; })
static inline void sh_remove_shadows(struct domain *d, mfn_t gmfn,
bool_t fast, bool_t all) {}
--
2.7.4 (Apple Git-66)
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |