[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC v1 62/74] xen/pvshim: memory hotplug
From: Roger Pau Monne <roger.pau@xxxxxxxxxx> Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> --- xen/arch/x86/pv/shim.c | 110 ++++++++++++++++++++++++++++++++++++++++++ xen/common/memory.c | 14 ++++++ xen/include/asm-x86/pv/shim.h | 10 ++++ 3 files changed, 134 insertions(+) diff --git a/xen/arch/x86/pv/shim.c b/xen/arch/x86/pv/shim.c index 29f343b871..eb34467833 100644 --- a/xen/arch/x86/pv/shim.c +++ b/xen/arch/x86/pv/shim.c @@ -90,6 +90,9 @@ static unsigned int nr_grant_list; static unsigned long *grant_frames; static DEFINE_SPINLOCK(grant_lock); +static PAGE_LIST_HEAD(balloon); +static DEFINE_SPINLOCK(balloon_lock); + #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER| \ _PAGE_GUEST_KERNEL) #define COMPAT_L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED) @@ -814,6 +817,113 @@ long pv_shim_cpu_down(void *data) return 0; } +static unsigned long batch_memory_op(int cmd, struct page_list_head *list) +{ + struct xen_memory_reservation xmr = { + .domid = DOMID_SELF, + }; + unsigned long pfns[64]; + struct page_info *pg; + unsigned long done = 0; + + set_xen_guest_handle(xmr.extent_start, pfns); + page_list_for_each ( pg, list ) + { + pfns[xmr.nr_extents++] = page_to_mfn(pg); + if ( xmr.nr_extents == ARRAY_SIZE(pfns) || !page_list_next(pg, list) ) + { + long nr = xen_hypercall_memory_op(cmd, &xmr); + + done += nr > 0 ? nr : 0; + if ( nr != xmr.nr_extents ) + break; + xmr.nr_extents = 0; + } + } + + return done; +} + +void pv_shim_online_memory(unsigned int nr, unsigned int order) +{ + struct page_info *page, *tmp; + PAGE_LIST_HEAD(list); + + spin_lock(&balloon_lock); + page_list_for_each_safe ( page, tmp, &balloon ) + { + if ( page->v.free.order != order ) + continue; + + page_list_del(page, &balloon); + page_list_add_tail(page, &list); + if ( !--nr ) + break; + } + spin_unlock(&balloon_lock); + + if ( nr ) + gprintk(XENLOG_WARNING, + "failed to allocate %u extents of order %u for onlining\n", + nr, order); + + nr = batch_memory_op(XENMEM_populate_physmap, &list); + while ( nr-- ) + { + BUG_ON((page = page_list_remove_head(&list)) == NULL); + free_domheap_pages(page, order); + } + + if ( !page_list_empty(&list) ) + { + gprintk(XENLOG_WARNING, + "failed to online some of the memory regions\n"); + spin_lock(&balloon_lock); + while ( (page = page_list_remove_head(&list)) != NULL ) + page_list_add_tail(page, &balloon); + spin_unlock(&balloon_lock); + } +} + +void pv_shim_offline_memory(unsigned int nr, unsigned int order) +{ + struct page_info *page; + PAGE_LIST_HEAD(list); + + while ( nr-- ) + { + page = alloc_domheap_pages(NULL, order, 0); + if ( !page ) + break; + + page_list_add_tail(page, &list); + page->v.free.order = order; + } + + if ( nr + 1 ) + gprintk(XENLOG_WARNING, + "failed to reserve %u extents of order %u for offlining\n", + nr + 1, order); + + + nr = batch_memory_op(XENMEM_decrease_reservation, &list); + spin_lock(&balloon_lock); + while ( nr-- ) + { + BUG_ON((page = page_list_remove_head(&list)) == NULL); + page_list_add_tail(page, &balloon); + } + spin_unlock(&balloon_lock); + + if ( !page_list_empty(&list) ) + { + gprintk(XENLOG_WARNING, + "failed to offline some of the memory regions\n"); + while ( (page = page_list_remove_head(&list)) != NULL ) + free_domheap_pages(page, order); + } +} + domid_t get_dom0_domid(void) { uint32_t eax, ebx, ecx, edx; diff --git a/xen/common/memory.c b/xen/common/memory.c index 5a1508a292..f06df8c8cf 100644 --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -29,6 +29,10 @@ #include <public/memory.h> #include <xsm/xsm.h> +#ifdef CONFIG_X86 +#include <asm/pv/shim.h> +#endif + struct memop_args { /* INPUT */ struct domain *domain; /* Domain to be affected. */ @@ -993,6 +997,11 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) return start_extent; } +#ifdef CONFIG_X86 + if ( pv_shim && op != XENMEM_decrease_reservation && !args.nr_done ) + pv_shim_online_memory(args.nr_extents, args.extent_order); +#endif + switch ( op ) { case XENMEM_increase_reservation: @@ -1015,6 +1024,11 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) __HYPERVISOR_memory_op, "lh", op | (rc << MEMOP_EXTENT_SHIFT), arg); +#ifdef CONFIG_X86 + if ( pv_shim && op == XENMEM_decrease_reservation ) + pv_shim_offline_memory(args.nr_extents, args.extent_order); +#endif + break; case XENMEM_exchange: diff --git a/xen/include/asm-x86/pv/shim.h b/xen/include/asm-x86/pv/shim.h index d107a617a7..7174f6fc07 100644 --- a/xen/include/asm-x86/pv/shim.h +++ b/xen/include/asm-x86/pv/shim.h @@ -42,6 +42,8 @@ long pv_shim_grant_table_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) uop, unsigned int count, bool compat); long pv_shim_cpu_up(void *data); long pv_shim_cpu_down(void *data); +void pv_shim_online_memory(unsigned int nr, unsigned int order); +void pv_shim_offline_memory(unsigned int nr, unsigned int order); domid_t get_dom0_domid(void); uint64_t pv_shim_mem(uint64_t avail); @@ -88,6 +90,14 @@ static inline long pv_shim_cpu_down(void *data) ASSERT_UNREACHABLE(); return 0; } +static inline void pv_shim_online_memory(unsigned int nr, unsigned int order) +{ + ASSERT_UNREACHABLE(); +} +static inline void pv_shim_offline_memory(unsigned int nr, unsigned int order) +{ + ASSERT_UNREACHABLE(); +} static inline domid_t get_dom0_domid(void) { return 0; -- 2.11.0 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |