WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86: Change cache attributes of Xen 1:1 p

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86: Change cache attributes of Xen 1:1 page mappings in response to
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 09 Nov 2007 04:20:54 -0800
Delivery-date: Fri, 09 Nov 2007 04:25:00 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir@xxxxxxxxxxxxx>
# Date 1194435845 0
# Node ID ff2edb1fd9f2376351f3d814f750ebe36e437821
# Parent  fbe7ed173314723f80f105e7e60fddd0bed77e5b
x86: Change cache attributes of Xen 1:1 page mappings in response to
guest mapping requests.
Based on a patch by Jan Beulich <jbeulich@xxxxxxxxxx>
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/ia64/xen/mm.c     |    8 +----
 xen/arch/x86/mm.c          |   71 +++++++++++++++++++++++++++++++++++++--------
 xen/common/grant_table.c   |    6 +--
 xen/include/asm-ia64/mm.h  |    3 -
 xen/include/asm-x86/mm.h   |   28 +++++++++--------
 xen/include/asm-x86/page.h |   10 ++++++
 6 files changed, 91 insertions(+), 35 deletions(-)

diff -r fbe7ed173314 -r ff2edb1fd9f2 xen/arch/ia64/xen/mm.c
--- a/xen/arch/ia64/xen/mm.c    Wed Nov 07 09:22:31 2007 +0000
+++ b/xen/arch/ia64/xen/mm.c    Wed Nov 07 11:44:05 2007 +0000
@@ -2894,11 +2894,9 @@ arch_memory_op(int op, XEN_GUEST_HANDLE(
     return 0;
 }
 
-int
-iomem_page_test(unsigned long mfn, struct page_info *page)
-{
-       return unlikely(!mfn_valid(mfn)) ||
-              unlikely(page_get_owner(page) == dom_io);
+int is_iomem_page(unsigned long mfn)
+{
+    return (!mfn_valid(mfn) || (page_get_owner(mfn_to_page(mfn)) == dom_io));
 }
 
 /*
diff -r fbe7ed173314 -r ff2edb1fd9f2 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Wed Nov 07 09:22:31 2007 +0000
+++ b/xen/arch/x86/mm.c Wed Nov 07 11:44:05 2007 +0000
@@ -607,10 +607,9 @@ get_##level##_linear_pagetable(         
 }
 
 
-int iomem_page_test(unsigned long mfn, struct page_info *page)
-{
-    return unlikely(!mfn_valid(mfn)) ||
-        unlikely(page_get_owner(page) == dom_io);
+int is_iomem_page(unsigned long mfn)
+{
+    return (!mfn_valid(mfn) || (page_get_owner(mfn_to_page(mfn)) == dom_io));
 }
 
 
@@ -620,19 +619,19 @@ get_page_from_l1e(
 {
     unsigned long mfn = l1e_get_pfn(l1e);
     struct page_info *page = mfn_to_page(mfn);
+    uint32_t l1f = l1e_get_flags(l1e);
     int okay;
 
-    if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) )
+    if ( !(l1f & _PAGE_PRESENT) )
         return 1;
 
-    if ( unlikely(l1e_get_flags(l1e) & l1_disallow_mask(d)) )
-    {
-        MEM_LOG("Bad L1 flags %x",
-                l1e_get_flags(l1e) & l1_disallow_mask(d));
+    if ( unlikely(l1f & l1_disallow_mask(d)) )
+    {
+        MEM_LOG("Bad L1 flags %x", l1f & l1_disallow_mask(d));
         return 0;
     }
 
-    if ( iomem_page_test(mfn, page) )
+    if ( is_iomem_page(mfn) )
     {
         /* DOMID_IO reverts to caller for privilege checks. */
         if ( d == dom_io )
@@ -657,7 +656,7 @@ get_page_from_l1e(
      * contribute to writeable mapping refcounts.  (This allows the
      * qemu-dm helper process in dom0 to map the domain's memory without
      * messing up the count of "real" writable mappings.) */
-    okay = (((l1e_get_flags(l1e) & _PAGE_RW) && 
+    okay = (((l1f & _PAGE_RW) && 
              !(unlikely(paging_mode_external(d) && (d != current->domain))))
             ? get_page_and_type(page, d, PGT_writable_page)
             : get_page(page, d));
@@ -667,6 +666,36 @@ get_page_from_l1e(
                 " for dom%d",
                 mfn, get_gpfn_from_mfn(mfn),
                 l1e_get_intpte(l1e), d->domain_id);
+    }
+    else if ( (pte_flags_to_cacheattr(l1f) !=
+               ((page->count_info >> PGC_cacheattr_base) & 7)) &&
+              !is_iomem_page(mfn) )
+    {
+        uint32_t x, nx, y = page->count_info;
+        uint32_t cacheattr = pte_flags_to_cacheattr(l1f);
+
+        if ( is_xen_heap_frame(page) )
+        {
+            if ( (l1f & _PAGE_RW) &&
+                 !(unlikely(paging_mode_external(d) &&
+                            (d != current->domain))) )
+                put_page_type(page);
+            put_page(page);
+            MEM_LOG("Attempt to change cache attributes of Xen heap page");
+            return 0;
+        }
+
+        while ( ((y >> PGC_cacheattr_base) & 7) != cacheattr )
+        {
+            x  = y;
+            nx = (x & ~PGC_cacheattr_mask) | (cacheattr << PGC_cacheattr_base);
+            y  = cmpxchg(&page->count_info, x, nx);
+        }
+
+#ifdef __x86_64__
+        map_pages_to_xen((unsigned long)mfn_to_virt(mfn), mfn, 1,
+                         PAGE_HYPERVISOR | cacheattr_to_pte_flags(cacheattr));
+#endif
     }
 
     return okay;
@@ -1825,6 +1854,24 @@ int get_page_type(struct page_info *page
     }
 
     return 1;
+}
+
+
+void cleanup_page_cacheattr(struct page_info *page)
+{
+    uint32_t cacheattr = (page->count_info >> PGC_cacheattr_base) & 7;
+
+    if ( likely(cacheattr == 0) )
+        return;
+
+    page->count_info &= ~PGC_cacheattr_mask;
+
+    BUG_ON(is_xen_heap_frame(page));
+
+#ifdef __x86_64__
+    map_pages_to_xen((unsigned long)page_to_virt(page), page_to_mfn(page),
+                     1, PAGE_HYPERVISOR);
+#endif
 }
 
 
@@ -3803,7 +3850,7 @@ static void __memguard_change_range(void
 {
     unsigned long _p = (unsigned long)p;
     unsigned long _l = (unsigned long)l;
-    unsigned long flags = __PAGE_HYPERVISOR | MAP_SMALL_PAGES;
+    unsigned int flags = __PAGE_HYPERVISOR | MAP_SMALL_PAGES;
 
     /* Ensure we are dealing with a page-aligned whole number of pages. */
     ASSERT((_p&~PAGE_MASK) == 0);
diff -r fbe7ed173314 -r ff2edb1fd9f2 xen/common/grant_table.c
--- a/xen/common/grant_table.c  Wed Nov 07 09:22:31 2007 +0000
+++ b/xen/common/grant_table.c  Wed Nov 07 11:44:05 2007 +0000
@@ -332,7 +332,7 @@ __gnttab_map_grant_ref(
     if ( op->flags & GNTMAP_host_map ) 
     {
         /* Could be an iomem page for setting up permission */
-        if ( iomem_page_test(frame, mfn_to_page(frame)) )
+        if ( is_iomem_page(frame) )
         {
             is_iomem = 1;
             if ( iomem_permit_access(ld, frame, frame) )
@@ -527,7 +527,7 @@ __gnttab_unmap_common(
                                                   op->flags)) < 0 )
                 goto unmap_out;
         }
-        else if ( iomem_page_test(op->frame, mfn_to_page(op->frame)) &&
+        else if ( is_iomem_page(op->frame) &&
                   iomem_access_permitted(ld, op->frame, op->frame) )
         {
             if ( (rc = iomem_deny_access(ld, op->frame, op->frame)) < 0 )
@@ -1651,7 +1651,7 @@ gnttab_release_mappings(
                 BUG_ON(!(act->pin & GNTPIN_hstw_mask));
                 act->pin -= GNTPIN_hstw_inc;
 
-                if ( iomem_page_test(act->frame, mfn_to_page(act->frame)) &&
+                if ( is_iomem_page(act->frame) &&
                      iomem_access_permitted(rd, act->frame, act->frame) )
                     rc = iomem_deny_access(rd, act->frame, act->frame);
                 else 
diff -r fbe7ed173314 -r ff2edb1fd9f2 xen/include/asm-ia64/mm.h
--- a/xen/include/asm-ia64/mm.h Wed Nov 07 09:22:31 2007 +0000
+++ b/xen/include/asm-ia64/mm.h Wed Nov 07 11:44:05 2007 +0000
@@ -185,8 +185,7 @@ static inline int get_page(struct page_i
     return 1;
 }
 
-/* Decide whether this page looks like iomem or real memory */
-int iomem_page_test(unsigned long mfn, struct page_info *page);
+int is_iomem_page(unsigned long mfn);
 
 extern void put_page_type(struct page_info *page);
 extern int get_page_type(struct page_info *page, u32 type);
diff -r fbe7ed173314 -r ff2edb1fd9f2 xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h  Wed Nov 07 09:22:31 2007 +0000
+++ b/xen/include/asm-x86/mm.h  Wed Nov 07 11:44:05 2007 +0000
@@ -84,25 +84,23 @@ struct page_info
 #define _PGT_pae_xen_l2     26
 #define PGT_pae_xen_l2      (1U<<_PGT_pae_xen_l2)
 
- /* 16-bit count of uses of this frame as its current type. */
-#define PGT_count_mask      ((1U<<16)-1)
+ /* 26-bit count of uses of this frame as its current type. */
+#define PGT_count_mask      ((1U<<26)-1)
 
  /* Cleared when the owning guest 'frees' this page. */
 #define _PGC_allocated      31
 #define PGC_allocated       (1U<<_PGC_allocated)
  /* Set on a *guest* page to mark it out-of-sync with its shadow */
-#define _PGC_out_of_sync     30
+#define _PGC_out_of_sync    30
 #define PGC_out_of_sync     (1U<<_PGC_out_of_sync)
  /* Set when is using a page as a page table */
-#define _PGC_page_table      29
+#define _PGC_page_table     29
 #define PGC_page_table      (1U<<_PGC_page_table)
- /* 29-bit count of references to this frame. */
-#define PGC_count_mask      ((1U<<29)-1)
-
-/* We trust the slab allocator in slab.c, and our use of it. */
-#define PageSlab(page)     (1)
-#define PageSetSlab(page)   ((void)0)
-#define PageClearSlab(page) ((void)0)
+ /* 3-bit PAT/PCD/PWT cache-attribute hint. */
+#define PGC_cacheattr_base  26
+#define PGC_cacheattr_mask  (7U<<PGC_cacheattr_base)
+ /* 26-bit count of references to this frame. */
+#define PGC_count_mask      ((1U<<26)-1)
 
 #define is_xen_heap_frame(pfn) ({                                       \
     paddr_t maddr = page_to_maddr(pfn);                                 \
@@ -147,6 +145,8 @@ void free_page_type(struct page_info *pa
 void free_page_type(struct page_info *page, unsigned long type);
 int _shadow_mode_refcounts(struct domain *d);
 
+void cleanup_page_cacheattr(struct page_info *page);
+
 static inline void put_page(struct page_info *page)
 {
     u32 nx, x, y = page->count_info;
@@ -158,7 +158,10 @@ static inline void put_page(struct page_
     while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) );
 
     if ( unlikely((nx & PGC_count_mask) == 0) )
+    {
+        cleanup_page_cacheattr(page);
         free_domheap_page(page);
+    }
 }
 
 
@@ -196,8 +199,7 @@ static inline int get_page(struct page_i
     return 1;
 }
 
-/* Decide whether this page looks like iomem or real memory */
-int iomem_page_test(unsigned long mfn, struct page_info *page);
+int is_iomem_page(unsigned long mfn);
 
 void put_page_type(struct page_info *page);
 int  get_page_type(struct page_info *page, unsigned long type);
diff -r fbe7ed173314 -r ff2edb1fd9f2 xen/include/asm-x86/page.h
--- a/xen/include/asm-x86/page.h        Wed Nov 07 09:22:31 2007 +0000
+++ b/xen/include/asm-x86/page.h        Wed Nov 07 11:44:05 2007 +0000
@@ -360,6 +360,16 @@ int map_pages_to_xen(
     unsigned int flags);
 void destroy_xen_mappings(unsigned long v, unsigned long e);
 
+/* Convert between PAT/PCD/PWT embedded in PTE flags and 3-bit cacheattr. */
+static inline uint32_t pte_flags_to_cacheattr(uint32_t flags)
+{
+    return ((flags >> 5) & 4) | ((flags >> 3) & 3);
+}
+static inline uint32_t cacheattr_to_pte_flags(uint32_t cacheattr)
+{
+    return ((cacheattr & 4) << 5) | ((cacheattr & 3) << 3);
+}
+
 #endif /* !__ASSEMBLY__ */
 
 #define PFN_DOWN(x)   ((x) >> PAGE_SHIFT)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86: Change cache attributes of Xen 1:1 page mappings in response to, Xen patchbot-unstable <=