# HG changeset patch # User yamahata@xxxxxxxxxxxxx # Date 1193122067 -32400 # Node ID 34f728eb165f5d6af1222d175c7ffb5485853f8d # Parent d105b8c30b7dc81cfaa6564c204b9d275fa7fee5 prevent softlock up when VTi domain destruction by making relinquish_memory() continuable. It was assumed that mm_teardown() frees most of page_list so that the list which is passed to relinquish_memory() is short. However the assumption isn't true for VTi domain case because qemu-dm maps all the domain pages. To avoid softlock up message, make relinquish_memory() continuable. PATCHNAME: make_relinquish_memory_continuable Signed-off-by: Isaku Yamahata diff -r d105b8c30b7d -r 34f728eb165f xen/arch/ia64/xen/domain.c --- a/xen/arch/ia64/xen/domain.c Mon Oct 22 13:57:08 2007 -0600 +++ b/xen/arch/ia64/xen/domain.c Tue Oct 23 15:47:47 2007 +0900 @@ -584,7 +584,9 @@ int arch_domain_create(struct domain *d) goto fail_nomem; memset(&d->arch.mm, 0, sizeof(d->arch.mm)); + d->arch.relres = RELRES_not_started; d->arch.mm_teardown_offset = 0; + INIT_LIST_HEAD(&d->arch.relmem_list); if ((d->arch.mm.pgd = pgd_alloc(&d->arch.mm)) == NULL) goto fail_nomem; @@ -1495,13 +1497,14 @@ int arch_set_info_guest(struct vcpu *v, return rc; } -static void relinquish_memory(struct domain *d, struct list_head *list) +static int relinquish_memory(struct domain *d, struct list_head *list) { struct list_head *ent; struct page_info *page; #ifndef __ia64__ unsigned long x, y; #endif + int ret = 0; /* Use a recursive lock, as we may enter 'free_domheap_page'. */ spin_lock_recursive(&d->page_alloc_lock); @@ -1514,6 +1517,7 @@ static void relinquish_memory(struct dom { /* Couldn't get a reference -- someone is freeing this page. */ ent = ent->next; + list_move_tail(&page->list, &d->arch.relmem_list); continue; } @@ -1550,35 +1554,72 @@ static void relinquish_memory(struct dom /* Follow the list chain and /then/ potentially free the page. */ ent = ent->next; BUG_ON(get_gpfn_from_mfn(page_to_mfn(page)) != INVALID_M2P_ENTRY); + list_move_tail(&page->list, &d->arch.relmem_list); put_page(page); + + if (hypercall_preempt_check()) { + ret = -EAGAIN; + goto out; + } } + list_splice_init(&d->arch.relmem_list, list); + + out: spin_unlock_recursive(&d->page_alloc_lock); + return ret; } int domain_relinquish_resources(struct domain *d) { - int ret; - /* Relinquish guest resources for VT-i domain. */ - if (d->arch.is_vti) - vmx_relinquish_guest_resources(d); - - /* Tear down shadow mode stuff. */ - ret = mm_teardown(d); - if (ret != 0) - return ret; - - /* Relinquish every page of memory. */ - relinquish_memory(d, &d->xenpage_list); - relinquish_memory(d, &d->page_list); - - if (d->arch.is_vti && d->arch.sal_data) - xfree(d->arch.sal_data); - - /* Free page used by xen oprofile buffer */ - free_xenoprof_pages(d); - - return 0; + int ret = 0; + + switch (d->arch.relres) { + case RELRES_not_started: + /* Relinquish guest resources for VT-i domain. */ + if (d->arch.is_vti) + vmx_relinquish_guest_resources(d); + d->arch.relres = RELRES_mm_teardown; + /*fallthrough*/ + + case RELRES_mm_teardown: + /* Tear down shadow mode stuff. */ + ret = mm_teardown(d); + if (ret != 0) + return ret; + d->arch.relres = RELRES_xen; + /* fallthrough */ + + case RELRES_xen: + /* Relinquish every xen page of memory. */ + ret = relinquish_memory(d, &d->xenpage_list); + if (ret != 0) + return ret; + d->arch.relres = RELRES_dom; + /* fallthrough */ + + case RELRES_dom: + /* Relinquish every domain page of memory. */ + ret = relinquish_memory(d, &d->page_list); + if (ret != 0) + return ret; + d->arch.relres = RELRES_done; + /* fallthrough */ + + case RELRES_done: + break; + + default: + BUG(); + } + + if (d->arch.is_vti && d->arch.sal_data) + xfree(d->arch.sal_data); + + /* Free page used by xen oprofile buffer */ + free_xenoprof_pages(d); + + return 0; } unsigned long diff -r d105b8c30b7d -r 34f728eb165f xen/include/asm-ia64/domain.h --- a/xen/include/asm-ia64/domain.h Mon Oct 22 13:57:08 2007 -0600 +++ b/xen/include/asm-ia64/domain.h Tue Oct 23 15:47:47 2007 +0900 @@ -192,7 +192,17 @@ struct arch_domain { #endif /* for domctl_destroy_domain continuation */ + enum { + RELRES_not_started, + RELRES_mm_teardown, + RELRES_xen, + RELRES_dom, + RELRES_done, + } relres; + /* Continuable mm_teardown() */ unsigned long mm_teardown_offset; + /* Continuable domain_relinquish_resources() */ + struct list_head relmem_list; }; #define INT_ENABLE_OFFSET(v) \ (sizeof(vcpu_info_t) * (v)->vcpu_id + \