On Sun, Mar 01, 2009 at 05:12:25PM +0800, Zhang, Yang wrote:
> >Isaku Yamahata wrote:
> >- The pages for vhpt is allocated by alloc_domheap_pages(NULL, ...),
> > Thus those memory doesn't accounted to the domain.
> > So adjusting domain memory size doesn't make sense. Just drop it.
> In the previous version, it allocates the pages for vhpt by
> alloc_domheap_pages(NULL, ...) already . I do not change it. And I think that
> the first arg with NULL represents that it doesn't belong to the domain.
As you described above, you used alloc_domheap_page(NULL, ) which means
that pages for vhpt aren't accounted for a given domain.
So the hunk in tools/python/xen/xend/image.py doesn't make sense, does it?
Or have you found any issues without the modification to image.py?
> >- Is vhpt_page[] really necessary?
> > How about setting hcb->hash directly instead of temporary
> > keeping it in vhpt_page[]?
> For the expansibility and the legible logic, I think it will be better to use
> the vhpt_page[].
Hmm, the patch makes the allocating/freeing vhpt interface convoluted.
The gut of your patch is to make the function, init_domain_vhpt(),
not to allocate pages by preallocation. So what should be done is
not only to add preallocate pages logic, but also to revise
the related functions.
Revise init_domain_vhpt() (and free_domain_vhpt()?). Maybe the underlying
functions, thash_alloc(), thash_free() need to be refactored.
thanks,
>
> Fix the issue that guest cannot be launched by using balloon driver.
>
> - In current version, when using balloon driver to balloon memory,it
> did not balloon the memory for vhpt.
> vhpt needs the continuous memory.But it allocates the memory for vhpt
> after it finished allocating the memory for guest. So the vhpt usually
> can not get the continuous memory and guest cannot be launched. Now
> i change the sequence.Before it allocates the memory for guest, i save
> the continuous memory for vhpt.when boot the vcpu, it allocate the saved
> memory for vhpt.
>
> Signed-off-by: Yang Zhang <yang.zhang@xxxxxxxxx>
>
> diff -r b432c632ebe8 tools/python/xen/xend/image.py
> --- a/tools/python/xen/xend/image.py Fri Feb 13 19:11:38 2009 +0900
> +++ b/tools/python/xen/xend/image.py Sun Mar 01 03:28:28 2009 -0500
> @@ -870,6 +870,9 @@ class IA64_HVM_ImageHandler(HVMImageHand
> extra_pages = 1024 + 5
> mem_kb += extra_pages * page_kb
> mem_kb += self.vramsize
> + # per-vcpu need 16M for max vhpt size
> + vhpt_kb = 16 * 1024
> + mem_kb += self.vm.getVCpuCount() * vhpt_kb
> return mem_kb
>
> def getRequiredInitialReservation(self):
> diff -r b432c632ebe8 xen/arch/ia64/vmx/vmmu.c
> --- a/xen/arch/ia64/vmx/vmmu.c Fri Feb 13 19:11:38 2009 +0900
> +++ b/xen/arch/ia64/vmx/vmmu.c Sun Mar 01 03:28:28 2009 -0500
> @@ -62,7 +62,7 @@ static int init_domain_vhpt(struct vcpu
> else
> size = canonicalize_vhpt_size(size);
>
> - rc = thash_alloc(&(v->arch.vhpt), size, "vhpt");
> + rc = thash_alloc(v, size, "vhpt");
> v->arch.arch_vmx.mpta = v->arch.vhpt.pta.val;
> return rc;
> }
> @@ -70,8 +70,10 @@ static int init_domain_vhpt(struct vcpu
>
> static void free_domain_vhpt(struct vcpu *v)
> {
> - if (v->arch.vhpt.hash)
> + if (v->arch.vhpt.hash) {
> thash_free(&(v->arch.vhpt));
> + v->domain->arch.hvm_domain.vhpt_page[v->vcpu_id] = NULL;
> + }
> }
>
> int init_domain_tlb(struct vcpu *v)
> @@ -82,7 +84,7 @@ int init_domain_tlb(struct vcpu *v)
> if (rc)
> return rc;
>
> - rc = thash_alloc(&(v->arch.vtlb), default_vtlb_sz, "vtlb");
> + rc = thash_alloc(v, default_vtlb_sz, "vtlb");
> if (rc) {
> free_domain_vhpt(v);
> return rc;
> diff -r b432c632ebe8 xen/arch/ia64/vmx/vmx_hypercall.c
> --- a/xen/arch/ia64/vmx/vmx_hypercall.c Fri Feb 13 19:11:38 2009 +0900
> +++ b/xen/arch/ia64/vmx/vmx_hypercall.c Sun Mar 01 03:28:28 2009 -0500
> @@ -147,6 +147,9 @@ do_hvm_op(unsigned long op, XEN_GUEST_HA
> a.value = current->domain->domain_id;
> rc = a.value ? -EINVAL : 0; /* no stub domain support */
> break;
> + case HVM_PARAM_VHPT_SIZE:
> + rc = domain_pre_alloc_vhpt(d, a.value);
> + break;
> default:
> /* nothing */
> break;
> diff -r b432c632ebe8 xen/arch/ia64/vmx/vmx_init.c
> --- a/xen/arch/ia64/vmx/vmx_init.c Fri Feb 13 19:11:38 2009 +0900
> +++ b/xen/arch/ia64/vmx/vmx_init.c Sun Mar 01 03:28:28 2009 -0500
> @@ -541,6 +541,7 @@ vmx_relinquish_guest_resources(struct do
> for_each_vcpu(d, v)
> vmx_release_assist_channel(v);
>
> + domain_pre_free_vhpt(d);
> vacpi_relinquish_resources(d);
>
> vmx_destroy_ioreq_page(d, &d->arch.vmx_platform.ioreq);
> @@ -661,3 +662,63 @@ void vmx_do_resume(struct vcpu *v)
> }
> }
> }
> +
> +static inline int get_vhpt_size(unsigned long size)
> +{
> + switch (size) {
> + case 0:
> + size = DEFAULT_VHPT_SZ;
> + break;
> + case 1 ... 15:
> + size = 15;
> + break;
> + case 16 ... (IA64_GRANULE_SHIFT - 1):
> + break;
> + default:
> + size = IA64_GRANULE_SHIFT - 1;
> + }
> + return size;
> +}
> +int domain_pre_alloc_vhpt(struct domain *d, unsigned long size)
> +{
> + struct vcpu *v;
> + struct page_info *pg = NULL;
> + int rc = 0;
> +
> + size = get_vhpt_size(size);
> + for_each_vcpu(d, v) {
> + if (!v->is_initialised) {
> + pg = alloc_domheap_pages(NULL, (size + 1 - PAGE_SHIFT), 0);
> + if (pg == NULL) {
> + printk("No enough contiguous memory(%ldKB) for alloc vhpt\n",
> + size >> 9);
> + rc = -ENOMEM;
> + goto fail;
> + }
> + d->arch.hvm_domain.vhpt_page[v->vcpu_id] = pg;
> + }
> + }
> + return rc;
> +
> +fail:
> + domain_pre_free_vhpt(d);
> + return rc;
> +}
> +
> +void domain_pre_free_vhpt(struct domain *d)
> +{
> + struct vcpu *v;
> + unsigned long size;
> + struct page_info *pg = NULL;
> +
> + size = d->arch.hvm_domain.params[HVM_PARAM_VHPT_SIZE];
> + size = get_vhpt_size(size);
> +
> + for_each_vcpu(d, v) {
> + pg = d->arch.hvm_domain.vhpt_page[v->vcpu_id];
> + if (pg != NULL) {
> + free_domheap_pages(pg, size + 1 - PAGE_SHIFT);
> + v->arch.vhpt.hash = NULL;
> + }
> + }
> +}
> diff -r b432c632ebe8 xen/arch/ia64/vmx/vtlb.c
> --- a/xen/arch/ia64/vmx/vtlb.c Fri Feb 13 19:11:38 2009 +0900
> +++ b/xen/arch/ia64/vmx/vtlb.c Sun Mar 01 03:28:28 2009 -0500
> @@ -723,13 +723,30 @@ static void thash_init(thash_cb_t *hcb,
> hcb->cch_freelist = NULL;
> }
>
> -int thash_alloc(thash_cb_t *hcb, u64 sz_log2, char *what)
> +int thash_alloc(struct vcpu *v, u64 sz_log2, char *what)
> {
> struct page_info *page;
> void * vbase;
> + thash_cb_t *hcb;
> u64 sz = 1UL << sz_log2;
>
> - page = alloc_domheap_pages(NULL, (sz_log2 + 1 - PAGE_SHIFT), 0);
> + if (!strcmp(what, "vhpt")) {
> + hcb = &(v->arch.vhpt);
> + page = v->domain->arch.hvm_domain.vhpt_page[v->vcpu_id];
> + v->domain->arch.hvm_domain.vhpt_page[v->vcpu_id] = NULL;
> + /* retry again */
> + if (page == NULL)
> + page = alloc_domheap_pages(NULL, (sz_log2 + 1 - PAGE_SHIFT), 0);
> +
> + if (page != NULL)
> + printk("alloc %s\n, addr=0x%lx, vcpuid=%d\n, domainid=%d\n",
> + what, (u64)page, v->vcpu_id, v->domain->domain_id);
> + }
> + else {
> + hcb = &(v->arch.vtlb);
> + page = alloc_domheap_pages(NULL, (sz_log2 + 1 - PAGE_SHIFT), 0);
> + }
> +
> if (page == NULL) {
> printk("No enough contiguous memory(%ldKB) for init_domain_%s\n",
> sz >> (10 - 1), what);
> diff -r b432c632ebe8 xen/include/asm-ia64/vmmu.h
> --- a/xen/include/asm-ia64/vmmu.h Fri Feb 13 19:11:38 2009 +0900
> +++ b/xen/include/asm-ia64/vmmu.h Sun Mar 01 03:28:28 2009 -0500
> @@ -136,7 +136,7 @@ typedef struct thash_cb {
> /*
> * Allocate and initialize internal control data before service.
> */
> -extern int thash_alloc(thash_cb_t *hcb, u64 sz, char *what);
> +extern int thash_alloc(struct vcpu *v, u64 sz, char *what);
>
> extern void thash_free(thash_cb_t *hcb);
>
> diff -r b432c632ebe8 xen/include/asm-ia64/vmx_platform.h
> --- a/xen/include/asm-ia64/vmx_platform.h Fri Feb 13 19:11:38 2009 +0900
> +++ b/xen/include/asm-ia64/vmx_platform.h Sun Mar 01 03:28:28 2009 -0500
> @@ -46,6 +46,8 @@ typedef struct virtual_platform_def {
> /* Pass-throgh VT-d */
> struct hvm_irq irq;
> struct hvm_iommu hvm_iommu;
> + /* pre-alloc pervcpu vhpt */
> + struct page_info *vhpt_page[MAX_VIRT_CPUS];
> } vir_plat_t;
>
> static inline int __fls(uint32_t word)
> _______________________________________________
> Xen-ia64-devel mailing list
> Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
> http://lists.xensource.com/xen-ia64-devel
--
yamahata
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel
|