# HG changeset patch # User yamahata@xxxxxxxxxxxxx # Date 1198237319 -32400 # Node ID 7f1e2deaf58ced913fc50b23cb561f88c7d89eaf # Parent 5db0f48672ca10b003132842965e5fb0e9eaeb0a allocate shared_info page from domain heap. Hyperprivops are where accessing shared_info page with psr.ic = 0. They are accessing the page via current_psr_i_addr. We can set it to shared_info_va which is mapped by IA64_TR_SHARED_INFO. Thus we can allocate the page form domain heap PATCHNAME: shared_info_from_domain_heap Signed-off-by: Isaku Yamahata diff -r 5db0f48672ca -r 7f1e2deaf58c xen/arch/ia64/xen/domain.c --- a/xen/arch/ia64/xen/domain.c Fri Dec 21 17:45:05 2007 +0900 +++ b/xen/arch/ia64/xen/domain.c Fri Dec 21 20:41:59 2007 +0900 @@ -211,8 +211,9 @@ void schedule_tail(struct vcpu *prev) load_region_regs(current); ia64_set_pta(vcpu_pta(current)); vcpu_load_kernel_regs(current); - __ia64_per_cpu_var(current_psr_i_addr) = ¤t->domain-> - shared_info->vcpu_info[current->vcpu_id].evtchn_upcall_mask; + __ia64_per_cpu_var(current_psr_i_addr) = + (uint8_t*)(current->domain->arch.shared_info_va + + INT_ENABLE_OFFSET(current)); __ia64_per_cpu_var(current_psr_ic_addr) = (int *) (current->domain->arch.shared_info_va + XSI_PSR_IC_OFS); migrate_timer(¤t->arch.hlt_timer, current->processor); @@ -279,8 +280,9 @@ void context_switch(struct vcpu *prev, s vcpu_set_next_timer(current); if (vcpu_timer_expired(current)) vcpu_pend_timer(current); - __ia64_per_cpu_var(current_psr_i_addr) = &nd->shared_info-> - vcpu_info[current->vcpu_id].evtchn_upcall_mask; + __ia64_per_cpu_var(current_psr_i_addr) = + (uint8_t*)(nd->arch.shared_info_va + + INT_ENABLE_OFFSET(current)); __ia64_per_cpu_var(current_psr_ic_addr) = (int *)(nd->arch.shared_info_va + XSI_PSR_IC_OFS); /* steal time accounting */ @@ -556,6 +558,7 @@ int arch_domain_create(struct domain *d) int arch_domain_create(struct domain *d) { int i; + struct page_info *page = NULL; // the following will eventually need to be negotiated dynamically d->arch.shared_info_va = DEFAULT_SHAREDINFO_ADDR; @@ -575,9 +578,11 @@ int arch_domain_create(struct domain *d) #endif if (tlb_track_create(d) < 0) goto fail_nomem1; - d->shared_info = alloc_xenheap_pages(get_order_from_shift(XSI_SHIFT)); - if (d->shared_info == NULL) - goto fail_nomem; + page = alloc_domheap_pages(NULL, get_order_from_shift(XSI_SHIFT), 0); + if (page == NULL) + goto fail_nomem; + d->shared_info = page_to_virt(page); + BUG_ON(d->shared_info == NULL); memset(d->shared_info, 0, XSI_SIZE); for (i = 0; i < XSI_SIZE; i += PAGE_SIZE) share_xen_page_with_guest(virt_to_page((char *)d->shared_info + i), @@ -619,17 +624,18 @@ fail_nomem1: fail_nomem1: if (d->arch.mm.pgd != NULL) pgd_free(d->arch.mm.pgd); + if (page != NULL) + free_domheap_pages(page, get_order_from_shift(XSI_SHIFT)); + return -ENOMEM; +} + +void arch_domain_destroy(struct domain *d) +{ + mm_final_teardown(d); + if (d->shared_info != NULL) - free_xenheap_pages(d->shared_info, get_order_from_shift(XSI_SHIFT)); - return -ENOMEM; -} - -void arch_domain_destroy(struct domain *d) -{ - mm_final_teardown(d); - - if (d->shared_info != NULL) - free_xenheap_pages(d->shared_info, get_order_from_shift(XSI_SHIFT)); + free_domheap_pages(virt_to_page(d->shared_info), + get_order_from_shift(XSI_SHIFT)); tlb_track_destroy(d); @@ -1700,6 +1706,8 @@ domain_set_shared_info_va (unsigned long VCPU(v, interrupt_mask_addr) = (unsigned char *)va + INT_ENABLE_OFFSET(v); + __ia64_per_cpu_var(current_psr_i_addr) = + (uint8_t*)(va + INT_ENABLE_OFFSET(current)); __ia64_per_cpu_var(current_psr_ic_addr) = (int *)(va + XSI_PSR_IC_OFS); /* Remap the shared pages. */