WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [IA64] domU destroy

# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 551f7935f79a842341ec5baf5fccb9c56bd5473b
# Parent  c644eb4049abedac4bbe83c0497b592b1f4bb531
[IA64] domU destroy

Signed-off-by: Akio Takebe <takebe_akio@xxxxxxxxxxxxxx>
Signed-off-by: Masaki Kanno <kanno.masaki@xxxxxxxxxxxxxx>

diff -r c644eb4049ab -r 551f7935f79a xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Fri Mar 10 15:23:39 2006
+++ b/xen/arch/ia64/xen/domain.c        Fri Mar 10 15:25:54 2006
@@ -49,6 +49,8 @@
 #include <asm/pal.h>
 #include <asm/vhpt.h>
 #include <public/hvm/ioreq.h>
+#include <asm/tlbflush.h>
+#include <asm/regionreg.h>
 
 #define CONFIG_DOMAIN0_CONTIGUOUS
 unsigned long dom0_start = -1L;
@@ -69,10 +71,7 @@
 /* FIXME: where these declarations should be there ? */
 extern void domain_pend_keyboard_interrupt(int);
 extern long platform_is_hp_ski(void);
-extern unsigned long allocate_metaphysical_rr(void);
-extern int allocate_rid_range(struct domain *, unsigned long);
 extern void sync_split_caches(void);
-extern void init_all_rr(struct vcpu *);
 extern void serial_input_init(void);
 
 static void init_switch_stack(struct vcpu *v);
@@ -80,9 +79,33 @@
 /* this belongs in include/asm, but there doesn't seem to be a suitable place 
*/
 void arch_domain_destroy(struct domain *d)
 {
-       printf("arch_domain_destroy: not implemented\n");
-       //free_page((unsigned long)d->mm.perdomain_pt);
-       free_xenheap_page(d->shared_info);
+       struct page *page;
+       struct list_head *ent, *prev;
+
+       if (d->arch.mm->pgd != NULL)
+       {
+               list_for_each ( ent, &d->arch.mm->pt_list )
+               {
+                       page = list_entry(ent, struct page, list);
+                       prev = ent->prev;
+                       list_del(ent);
+                       free_xenheap_page(page_to_virt(page));
+                       ent = prev;
+               }
+               pgd_free(d->arch.mm->pgd);
+       }
+       if (d->arch.mm != NULL)
+               xfree(d->arch.mm);
+       if (d->shared_info != NULL)
+               free_xenheap_page(d->shared_info);
+
+       deallocate_rid_range(d);
+
+       /* It is really good in this? */
+       flush_tlb_all();
+
+       /* It is really good in this? */
+       vhpt_flush();
 }
 
 static void default_idle(void)
@@ -187,6 +210,8 @@
 
 void free_vcpu_struct(struct vcpu *v)
 {
+       if (v->arch.privregs != NULL)
+               free_xenheap_pages(v->arch.privregs, 
get_order(sizeof(mapped_regs_t)));
        free_xenheap_pages(v, KERNEL_STACK_SIZE_ORDER);
 }
 
@@ -239,6 +264,7 @@
        if ((d->arch.mm = xmalloc(struct mm_struct)) == NULL)
            goto fail_nomem;
        memset(d->arch.mm, 0, sizeof(*d->arch.mm));
+       INIT_LIST_HEAD(&d->arch.mm->pt_list);
 
        if ((d->arch.mm->pgd = pgd_alloc(d->arch.mm)) == NULL)
            goto fail_nomem;
@@ -310,10 +336,74 @@
        return 0;
 }
 
+static void relinquish_memory(struct domain *d, struct list_head *list)
+{
+    struct list_head *ent;
+    struct page      *page;
+#ifndef __ia64__
+    unsigned long     x, y;
+#endif
+
+    /* Use a recursive lock, as we may enter 'free_domheap_page'. */
+    spin_lock_recursive(&d->page_alloc_lock);
+    ent = list->next;
+    while ( ent != list )
+    {
+        page = list_entry(ent, struct page, list);
+        /* Grab a reference to the page so it won't disappear from under us. */
+        if ( unlikely(!get_page(page, d)) )
+        {
+            /* Couldn't get a reference -- someone is freeing this page. */
+            ent = ent->next;
+            continue;
+        }
+
+        if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
+            put_page_and_type(page);
+
+        if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
+            put_page(page);
+
+#ifndef __ia64__
+        /*
+         * Forcibly invalidate base page tables at this point to break circular
+         * 'linear page table' references. This is okay because MMU structures
+         * are not shared across domains and this domain is now dead. Thus base
+         * tables are not in use so a non-zero count means circular reference.
+         */
+        y = page->u.inuse.type_info;
+        for ( ; ; )
+        {
+            x = y;
+            if ( likely((x & (PGT_type_mask|PGT_validated)) !=
+                        (PGT_base_page_table|PGT_validated)) )
+                break;
+
+            y = cmpxchg(&page->u.inuse.type_info, x, x & ~PGT_validated);
+            if ( likely(y == x) )
+            {
+                free_page_type(page, PGT_base_page_table);
+                break;
+            }
+        }
+#endif
+
+        /* Follow the list chain and /then/ potentially free the page. */
+        ent = ent->next;
+        put_page(page);
+    }
+
+    spin_unlock_recursive(&d->page_alloc_lock);
+}
+
 void domain_relinquish_resources(struct domain *d)
 {
-       /* FIXME */
-       printf("domain_relinquish_resources: not implemented\n");
+    /* Relinquish every page of memory. */
+
+    /* xenheap_list is not used in ia64. */
+    BUG_ON(!list_empty(&d->xenpage_list));
+
+    relinquish_memory(d, &d->page_list);
 }
 
 // heavily leveraged from linux/arch/ia64/kernel/process.c:copy_thread()
@@ -389,7 +479,7 @@
 struct page * assign_new_domain_page(struct domain *d, unsigned long mpaddr)
 {
        struct mm_struct *mm = d->arch.mm;
-       struct page *p = (struct page *)0;
+       struct page *pt, *p = (struct page *)0;
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd;
@@ -401,16 +491,28 @@
        }
        pgd = pgd_offset(mm,mpaddr);
        if (pgd_none(*pgd))
+       {
                pgd_populate(mm, pgd, pud_alloc_one(mm,mpaddr));
+               pt = maddr_to_page(pgd_val(*pgd));
+               list_add_tail(&pt->list, &d->arch.mm->pt_list);
+       }
 
        pud = pud_offset(pgd, mpaddr);
        if (pud_none(*pud))
+       {
                pud_populate(mm, pud, pmd_alloc_one(mm,mpaddr));
+               pt = maddr_to_page(pud_val(*pud));
+               list_add_tail(&pt->list, &d->arch.mm->pt_list);
+       }
 
        pmd = pmd_offset(pud, mpaddr);
        if (pmd_none(*pmd))
+       {
                pmd_populate_kernel(mm, pmd, pte_alloc_one_kernel(mm,mpaddr));
 //             pmd_populate(mm, pmd, pte_alloc_one(mm,mpaddr));
+               pt = maddr_to_page(pmd_val(*pmd));
+               list_add_tail(&pt->list, &d->arch.mm->pt_list);
+       }
 
        pte = pte_offset_map(pmd, mpaddr);
        if (pte_none(*pte)) {
@@ -443,6 +545,7 @@
 void assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long 
physaddr)
 {
        struct mm_struct *mm = d->arch.mm;
+       struct page *pt;
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd;
@@ -454,16 +557,28 @@
        }
        pgd = pgd_offset(mm,mpaddr);
        if (pgd_none(*pgd))
+       {
                pgd_populate(mm, pgd, pud_alloc_one(mm,mpaddr));
+               pt = maddr_to_page(pgd_val(*pgd));
+               list_add_tail(&pt->list, &d->arch.mm->pt_list);
+       }
 
        pud = pud_offset(pgd, mpaddr);
        if (pud_none(*pud))
+       {
                pud_populate(mm, pud, pmd_alloc_one(mm,mpaddr));
+               pt = maddr_to_page(pud_val(*pud));
+               list_add_tail(&pt->list, &d->arch.mm->pt_list);
+       }
 
        pmd = pmd_offset(pud, mpaddr);
        if (pmd_none(*pmd))
+       {
                pmd_populate_kernel(mm, pmd, pte_alloc_one_kernel(mm,mpaddr));
 //             pmd_populate(mm, pmd, pte_alloc_one(mm,mpaddr));
+               pt = maddr_to_page(pmd_val(*pmd));
+               list_add_tail(&pt->list, &d->arch.mm->pt_list);
+       }
 
        pte = pte_offset_map(pmd, mpaddr);
        if (pte_none(*pte)) {
diff -r c644eb4049ab -r 551f7935f79a xen/arch/ia64/xen/regionreg.c
--- a/xen/arch/ia64/xen/regionreg.c     Fri Mar 10 15:23:39 2006
+++ b/xen/arch/ia64/xen/regionreg.c     Fri Mar 10 15:25:54 2006
@@ -157,7 +157,6 @@
        int rid_block_end = d->arch.ending_rid >> IA64_MIN_IMPL_RID_BITS;
        int rid_block_start = d->arch.starting_rid >> IA64_MIN_IMPL_RID_BITS;
 
-       return 1;  // KLUDGE ALERT
        //
        // not all domains will have allocated RIDs (physical mode loaders for 
instance)
        //
diff -r c644eb4049ab -r 551f7935f79a xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h     Fri Mar 10 15:23:39 2006
+++ b/xen/include/asm-ia64/domain.h     Fri Mar 10 15:25:54 2006
@@ -102,6 +102,8 @@
 #endif
        spinlock_t page_table_lock;             /* Protects task page tables 
and mm->rss */
 
+       struct list_head pt_list;               /* List of pagetable */
+
        struct list_head mmlist;                /* List of all active mm's.  
These are globally strung
                                                 * together off init_mm.mmlist, 
and are protected
                                                 * by mmlist_lock
diff -r c644eb4049ab -r 551f7935f79a xen/include/asm-ia64/regionreg.h
--- a/xen/include/asm-ia64/regionreg.h  Fri Mar 10 15:23:39 2006
+++ b/xen/include/asm-ia64/regionreg.h  Fri Mar 10 15:25:54 2006
@@ -64,4 +64,13 @@
 // since vmMangleRID is symmetric, use it for unmangling also
 #define vmUnmangleRID(x)       vmMangleRID(x)
 
+extern unsigned long allocate_metaphysical_rr(void);
+
+struct domain;
+extern int allocate_rid_range(struct domain *d, unsigned long ridbits);
+extern int deallocate_rid_range(struct domain *d);
+
+struct vcpu;
+extern void init_all_rr(struct vcpu *v);
+
 #endif         /* !_REGIONREG_H_ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [IA64] domU destroy, Xen patchbot -unstable <=