WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] xsm: Fix xsm_mmu_* and xsm_update_va_mapp

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] xsm: Fix xsm_mmu_* and xsm_update_va_mapping hooks
From: Xen patchbot-unstable <patchbot@xxxxxxx>
Date: Wed, 13 Apr 2011 01:05:20 +0100
Delivery-date: Tue, 12 Apr 2011 17:08:42 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Stephen Smalley <sds@xxxxxxxxxxxxx>
# Date 1302616525 -3600
# Node ID 56a3b9c7367f67b07fa5cb709c883fd61bb88fbd
# Parent  dbb5ad1c51bd91a95d07bb896c416bcaab9e4455
xsm: Fix xsm_mmu_* and xsm_update_va_mapping hooks

This is an attempt to properly fix the hypervisor crash previously
described in
http://marc.info/?l=xen-devel&m=128396289707362&w=2

In looking into this issue, I think the proper fix is to move the
xsm_mmu_* and xsm_update_va_mapping hook calls later in the callers,
after more validation has been performed and the page_info struct is
readily available, and pass the page_info to the hooks. This patch
moves the xsm_mmu_normal_update, xsm_mmu_machphys_update and
xsm_update_va_mapping hook calls accordingly, and updates their
interfaces and hook function implementations.  This appears to resolve
the crashes for me.

Signed-off-by: Stephen Smalley <sds@xxxxxxxxxxxxx>
---


diff -r dbb5ad1c51bd -r 56a3b9c7367f xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Tue Apr 12 14:00:49 2011 +0100
+++ b/xen/arch/x86/mm.c Tue Apr 12 14:55:25 2011 +0100
@@ -3476,9 +3476,6 @@
         {
             p2m_type_t p2mt;
 
-            rc = xsm_mmu_normal_update(d, pg_owner, req.val);
-            if ( rc )
-                break;
             rc = -EINVAL;
 
             req.ptr -= cmd;
@@ -3506,6 +3503,13 @@
                           (unsigned long)(req.ptr & ~PAGE_MASK));
             page = mfn_to_page(mfn);
 
+            rc = xsm_mmu_normal_update(d, req.val, page);
+            if ( rc ) {
+                unmap_domain_page_with_cache(va, &mapcache);
+                put_page(page);
+                break;
+            }
+
             if ( page_lock(page) )
             {
                 switch ( page->u.inuse.type_info & PGT_type_mask )
@@ -3668,10 +3672,6 @@
             mfn = req.ptr >> PAGE_SHIFT;
             gpfn = req.val;
 
-            rc = xsm_mmu_machphys_update(d, mfn);
-            if ( rc )
-                break;
-
             if ( unlikely(!get_page_from_pagenr(mfn, pg_owner)) )
             {
                 MEM_LOG("Could not get page for mach->phys update");
@@ -3686,6 +3686,10 @@
                 break;
             }
 
+            rc = xsm_mmu_machphys_update(d, mfn_to_page(mfn));
+            if ( rc )
+                break;
+
             set_gpfn_from_mfn(mfn, gpfn);
 
             paging_mark_dirty(pg_owner, mfn);
@@ -4306,10 +4310,6 @@
 
     perfc_incr(calls_to_update_va);
 
-    rc = xsm_update_va_mapping(d, pg_owner, val);
-    if ( rc )
-        return rc;
-
     rc = -EINVAL;
     pl1e = guest_map_l1e(v, va, &gl1mfn);
     if ( unlikely(!pl1e || !get_page_from_pagenr(gl1mfn, d)) )
@@ -4329,6 +4329,13 @@
         goto out;
     }
 
+    rc = xsm_update_va_mapping(d, val, gl1pg);
+    if ( rc ) {
+        page_unlock(gl1pg);
+        put_page(gl1pg);
+        goto out;
+    }
+
     rc = mod_l1_entry(pl1e, val, gl1mfn, 0, v, pg_owner);
 
     page_unlock(gl1pg);
diff -r dbb5ad1c51bd -r 56a3b9c7367f xen/include/xsm/xsm.h
--- a/xen/include/xsm/xsm.h     Tue Apr 12 14:00:49 2011 +0100
+++ b/xen/include/xsm/xsm.h     Tue Apr 12 14:55:25 2011 +0100
@@ -136,11 +136,12 @@
     int (*getidletime) (void);
     int (*machine_memory_map) (void);
     int (*domain_memory_map) (struct domain *d);
-    int (*mmu_normal_update) (struct domain *d, struct domain *f, 
-                                                                intpte_t fpte);
-    int (*mmu_machphys_update) (struct domain *d, unsigned long mfn);
-    int (*update_va_mapping) (struct domain *d, struct domain *f, 
-                                                            l1_pgentry_t pte);
+    int (*mmu_normal_update) (struct domain *d,
+                             intpte_t fpte, struct page_info *page);
+    int (*mmu_machphys_update) (struct domain *d, struct page_info *page);
+    int (*update_va_mapping) (struct domain *d,
+                             l1_pgentry_t pte,
+                             struct page_info *page);
     int (*add_to_physmap) (struct domain *d1, struct domain *d2);
     int (*sendtrigger) (struct domain *d);
     int (*test_assign_device) (uint32_t machine_bdf);
@@ -571,21 +572,22 @@
     return xsm_call(domain_memory_map(d));
 }
 
-static inline int xsm_mmu_normal_update (struct domain *d, struct domain *f, 
-                                                                intpte_t fpte)
+static inline int xsm_mmu_normal_update (struct domain *d,
+                                        intpte_t fpte, struct page_info *page)
 {
-    return xsm_call(mmu_normal_update(d, f, fpte));
+    return xsm_call(mmu_normal_update(d, fpte, page));
 }
 
-static inline int xsm_mmu_machphys_update (struct domain *d, unsigned long mfn)
+static inline int xsm_mmu_machphys_update (struct domain *d, struct page_info 
*page)
 {
-    return xsm_call(mmu_machphys_update(d, mfn));
+    return xsm_call(mmu_machphys_update(d, page));
 }
 
-static inline int xsm_update_va_mapping(struct domain *d, struct domain *f, 
-                                                            l1_pgentry_t pte)
+static inline int xsm_update_va_mapping(struct domain *d,
+                                       l1_pgentry_t pte,
+                                       struct page_info *page)
 {
-    return xsm_call(update_va_mapping(d, f, pte));
+    return xsm_call(update_va_mapping(d, pte, page));
 }
 
 static inline int xsm_add_to_physmap(struct domain *d1, struct domain *d2)
diff -r dbb5ad1c51bd -r 56a3b9c7367f xen/xsm/dummy.c
--- a/xen/xsm/dummy.c   Tue Apr 12 14:00:49 2011 +0100
+++ b/xen/xsm/dummy.c   Tue Apr 12 14:55:25 2011 +0100
@@ -405,19 +405,20 @@
     return 0;
 }
 
-static int dummy_mmu_normal_update (struct domain *d, struct domain *f, 
-                                                                intpte_t fpte)
+static int dummy_mmu_normal_update (struct domain *d,
+                                   intpte_t fpte, struct page_info *page)
 {
     return 0;
 }
 
-static int dummy_mmu_machphys_update (struct domain *d, unsigned long mfn)
+static int dummy_mmu_machphys_update (struct domain *d, struct page_info *page)
 {
     return 0;
 }
 
-static int dummy_update_va_mapping (struct domain *d, struct domain *f, 
-                                                            l1_pgentry_t pte)
+static int dummy_update_va_mapping (struct domain *d,
+                                   l1_pgentry_t pte,
+                                   struct page_info *page)
 {
     return 0;
 }
diff -r dbb5ad1c51bd -r 56a3b9c7367f xen/xsm/flask/hooks.c
--- a/xen/xsm/flask/hooks.c     Tue Apr 12 14:00:49 2011 +0100
+++ b/xen/xsm/flask/hooks.c     Tue Apr 12 14:55:25 2011 +0100
@@ -347,26 +347,6 @@
     return rc;
 }
 
-static int get_mfn_sid(unsigned long mfn, u32 *sid)
-{
-    int rc = 0;
-    struct page_info *page;
-
-    if ( mfn_valid(mfn) )
-    {
-        /*mfn is valid if this is a page that Xen is tracking!*/
-        page = mfn_to_page(mfn);
-        rc = get_page_sid(page, sid);
-    }
-    else
-    {
-        /*Possibly an untracked IO page?*/
-        rc = security_iomem_sid(mfn, sid);
-    }
-
-    return rc;    
-}
-
 static int flask_memory_adjust_reservation(struct domain *d1, struct domain 
*d2)
 {
     return domain_has_perm(d1, d2, SECCLASS_MMU, MMU__ADJUST);
@@ -1006,12 +986,11 @@
     return domain_has_perm(current->domain, d, SECCLASS_MMU, MMU__MEMORYMAP);
 }
 
-static int flask_mmu_normal_update(struct domain *d, struct domain *f, 
-                                   intpte_t fpte)
+static int flask_mmu_normal_update(struct domain *d, 
+                                   intpte_t fpte, struct page_info *page)
 {
     int rc = 0;
     u32 map_perms = MMU__MAP_READ;
-    unsigned long fmfn;
     struct domain_security_struct *dsec;
     u32 fsid;
 
@@ -1020,42 +999,38 @@
     if ( l1e_get_flags(l1e_from_intpte(fpte)) & _PAGE_RW )
         map_perms |= MMU__MAP_WRITE;
 
-    fmfn = gmfn_to_mfn(f, l1e_get_pfn(l1e_from_intpte(fpte)));
-
-    rc = get_mfn_sid(fmfn, &fsid);
+    rc = get_page_sid(page, &fsid);
     if ( rc )
         return rc;
 
     return avc_has_perm(dsec->sid, fsid, SECCLASS_MMU, map_perms, NULL);
 }
 
-static int flask_mmu_machphys_update(struct domain *d, unsigned long mfn)
+static int flask_mmu_machphys_update(struct domain *d, struct page_info *page)
 {
     int rc = 0;
     u32 psid;
     struct domain_security_struct *dsec;
     dsec = d->ssid;
 
-    rc = get_mfn_sid(mfn, &psid);
+    rc = get_page_sid(page, &psid);
     if ( rc )
         return rc;
 
     return avc_has_perm(dsec->sid, psid, SECCLASS_MMU, MMU__UPDATEMP, NULL);
 }
 
-static int flask_update_va_mapping(struct domain *d, struct domain *f, 
-                                   l1_pgentry_t pte)
+static int flask_update_va_mapping(struct domain *d,
+                                   l1_pgentry_t pte, struct page_info *page)
 {
     int rc = 0;
     u32 psid;
     u32 map_perms = MMU__MAP_READ;
-    unsigned long mfn;
     struct domain_security_struct *dsec;
 
     dsec = d->ssid;
 
-    mfn = gmfn_to_mfn(f, l1e_get_pfn(pte));        
-    rc = get_mfn_sid(mfn, &psid);
+    rc = get_page_sid(page, &psid);
     if ( rc )
         return rc;
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] xsm: Fix xsm_mmu_* and xsm_update_va_mapping hooks, Xen patchbot-unstable <=