WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-ia64-devel

[Xen-ia64-devel] Re: [Xen-devel] [PATCH 3/3] continuable destroy domain:

To: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>, xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-ia64-devel] Re: [Xen-devel] [PATCH 3/3] continuable destroy domain: ia64 part
From: Akio Takebe <takebe_akio@xxxxxxxxxxxxxx>
Date: Mon, 03 Sep 2007 19:56:22 +0900
Cc: KRYSANS@xxxxxxxxxx, xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
Delivery-date: Mon, 03 Sep 2007 03:55:36 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
In-reply-to: <20070831140609.GA23214%yamahata@xxxxxxxxxxxxx>
List-help: <mailto:xen-ia64-devel-request@lists.xensource.com?subject=help>
List-id: Discussion of the ia64 port of Xen <xen-ia64-devel.lists.xensource.com>
List-post: <mailto:xen-ia64-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-ia64-devel>, <mailto:xen-ia64-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-ia64-devel>, <mailto:xen-ia64-devel-request@lists.xensource.com?subject=unsubscribe>
References: <20070831140609.GA23214%yamahata@xxxxxxxxxxxxx>
Sender: xen-ia64-devel-bounces@xxxxxxxxxxxxxxxxxxx
Hi, Isaku

After applying your patch, mm_teardown() return integer,
but I didn't find a patch for mm_teardown().

Is the following patch right?

Signed-off-by: Akio Takebe <takebe_akio@xxxxxxxxxxxxxx>

diff -r 3b50a7e52ff2 xen/arch/ia64/xen/mm.c
--- a/xen/arch/ia64/xen/mm.c    Fri Aug 31 17:00:11 2007 +0100
+++ b/xen/arch/ia64/xen/mm.c    Mon Sep 03 20:00:05 2007 +0900
@@ -338,14 +338,21 @@ mm_teardown(struct domain* d)
     volatile pgd_t* pgd;
 
     if (mm->pgd == NULL)
-        return;
+        return 0;
 
     pgd = pgd_offset(mm, 0);
     for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
-        if (!pgd_present(*pgd)) // acquire semantics
+        unsigned long cur_offset = i << PGDIR_SHIFT;
+        if (mm_teardown_can_skip(d, cur_offset + PGDIR_SIZE))
             continue;
-        mm_teardown_pgd(d, pgd, i << PGDIR_SHIFT);
-    }
+        if (!pgd_present(*pgd)) { // acquire semantics
+            mm_teardown_update_offset(d, cur_offset);
+            continue;
+        }
+        if (mm_teardown_pgd(d, pgd, cur_offset))
+            return -EAGAIN;
+    }
+    return 0;
 }
 
 static void


Best Regards,

Akio Takebe

># HG changeset patch
># User yamahata@xxxxxxxxxxxxx
># Date 1188280280 -32400
># Node ID b4fe65fdc26d5b56057f3711e7d8bf4a3617cfa8
># Parent  b8f3785f15bde5da96b4ffd1cde3e677d54abf90
>Implement ia64 continuable domain detroy.
>PATCHNAME: implement_ia64_continuable_domain_destroy
>
>Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
>
>diff -r b8f3785f15bd -r b4fe65fdc26d xen/arch/ia64/xen/domain.c
>--- a/xen/arch/ia64/xen/domain.c       Fri Aug 31 19:52:21 2007 +0900
>+++ b/xen/arch/ia64/xen/domain.c       Tue Aug 28 14:51:20 2007 +0900
>@@ -563,6 +563,7 @@ int arch_domain_create(struct domain *d)
>               goto fail_nomem;
> 
>       memset(&d->arch.mm, 0, sizeof(d->arch.mm));
>+      d->arch.mm_teardown_offset = 0;
> 
>       if ((d->arch.mm.pgd = pgd_alloc(&d->arch.mm)) == NULL)
>           goto fail_nomem;
>@@ -938,12 +939,15 @@ static void relinquish_memory(struct dom
> 
> int domain_relinquish_resources(struct domain *d)
> {
>+    int ret;
>     /* Relinquish guest resources for VT-i domain. */
>     if (d->vcpu[0] && VMX_DOMAIN(d->vcpu[0]))
>           vmx_relinquish_guest_resources(d);
> 
>     /* Tear down shadow mode stuff. */
>-    mm_teardown(d);
>+    ret = mm_teardown(d);
>+    if (ret != 0)
>+        return ret;
> 
>     /* Relinquish every page of memory. */
>     relinquish_memory(d, &d->xenpage_list);
>diff -r b8f3785f15bd -r b4fe65fdc26d xen/arch/ia64/xen/mm.c
>--- a/xen/arch/ia64/xen/mm.c   Fri Aug 31 19:52:21 2007 +0900
>+++ b/xen/arch/ia64/xen/mm.c   Tue Aug 28 14:51:20 2007 +0900
>@@ -215,6 +215,18 @@ alloc_dom_xen_and_dom_io(void)
>     BUG_ON(dom_io == NULL);
> }
> 
>+static int
>+mm_teardown_can_skip(struct domain* d, unsigned long offset)
>+{
>+    return d->arch.mm_teardown_offset > offset;
>+}
>+
>+static void
>+mm_teardown_update_offset(struct domain* d, unsigned long offset)
>+{
>+    d->arch.mm_teardown_offset = offset;
>+}
>+
> static void
> mm_teardown_pte(struct domain* d, volatile pte_t* pte, unsigned long offset)
> {
>@@ -252,46 +264,73 @@ mm_teardown_pte(struct domain* d, volati
>     }
> }
> 
>-static void
>+static int
> mm_teardown_pmd(struct domain* d, volatile pmd_t* pmd, unsigned long offset)
> {
>     unsigned long i;
>     volatile pte_t* pte = pte_offset_map(pmd, offset);
> 
>     for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
>-        if (!pte_present(*pte)) // acquire semantics
>+        unsigned long cur_offset = offset + (i << PAGE_SHIFT);
>+        if (mm_teardown_can_skip(d, cur_offset + PAGE_SIZE))
>             continue;
>-        mm_teardown_pte(d, pte, offset + (i << PAGE_SHIFT));
>-    }
>-}
>-
>-static void
>+        if (!pte_present(*pte)) { // acquire semantics
>+            mm_teardown_update_offset(d, cur_offset);
>+            continue;
>+        }
>+        mm_teardown_update_offset(d, cur_offset);
>+        mm_teardown_pte(d, pte, cur_offset);
>+        if (hypercall_preempt_check())
>+            return -EAGAIN;
>+    }
>+    return 0;
>+}
>+
>+static int
> mm_teardown_pud(struct domain* d, volatile pud_t *pud, unsigned long offset)
> {
>     unsigned long i;
>     volatile pmd_t *pmd = pmd_offset(pud, offset);
> 
>     for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
>-        if (!pmd_present(*pmd)) // acquire semantics
>+        unsigned long cur_offset = offset + (i << PMD_SHIFT);
>+        if (mm_teardown_can_skip(d, cur_offset + PMD_SIZE))
>             continue;
>-        mm_teardown_pmd(d, pmd, offset + (i << PMD_SHIFT));
>-    }
>-}
>-
>-static void
>+        if (!pmd_present(*pmd)) { // acquire semantics
>+            mm_teardown_update_offset(d, cur_offset);
>+            continue;
>+        }
>+        if (mm_teardown_pmd(d, pmd, cur_offset))
>+            return -EAGAIN;
>+    }
>+    return 0;
>+}
>+
>+static int
> mm_teardown_pgd(struct domain* d, volatile pgd_t *pgd, unsigned long offset)
> {
>     unsigned long i;
>     volatile pud_t *pud = pud_offset(pgd, offset);
> 
>     for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
>-        if (!pud_present(*pud)) // acquire semantics
>+        unsigned long cur_offset = offset + (i << PUD_SHIFT);
>+#ifndef __PAGETABLE_PUD_FOLDED
>+        if (mm_teardown_can_skip(d, cur_offset + PUD_SIZE))
>             continue;
>-        mm_teardown_pud(d, pud, offset + (i << PUD_SHIFT));
>-    }
>-}
>-
>-void
>+#endif
>+        if (!pud_present(*pud)) { // acquire semantics
>+#ifndef __PAGETABLE_PUD_FOLDED
>+            mm_teardown_update_offset(d, cur_offset);
>+#endif
>+            continue;
>+        }
>+        if (mm_teardown_pud(d, pud, cur_offset))
>+            return -EAGAIN;
>+    }
>+    return 0;
>+}
>+
>+int
> mm_teardown(struct domain* d)
> {
>     struct mm_struct* mm = &d->arch.mm;


_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel

<Prev in Thread] Current Thread [Next in Thread>