WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Forward port of I/O-page fix in 2.0 series.

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] Forward port of I/O-page fix in 2.0 series.
From: BitKeeper Bot <riel@xxxxxxxxxxx>
Date: Thu, 14 Apr 2005 22:30:46 +0000
Delivery-date: Thu, 14 Apr 2005 23:03:17 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: Xen Development List <xen-devel@xxxxxxxxxxxxxxxxxxx>
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
ChangeSet 1.1299, 2005/04/14 23:30:46+01:00, kaf24@xxxxxxxxxxxxxxxxxxxx

        Forward port of I/O-page fix in 2.0 series.
        Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>



 arch/x86/mm.c            |   69 ++++++++++++++++++++++++++++++++++-------------
 arch/x86/shadow.c        |    2 -
 common/grant_table.c     |    2 -
 include/asm-x86/page.h   |    9 ------
 include/asm-x86/shadow.h |   10 +++---
 5 files changed, 58 insertions(+), 34 deletions(-)


diff -Nru a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c 2005-04-14 19:03:29 -04:00
+++ b/xen/arch/x86/mm.c 2005-04-14 19:03:29 -04:00
@@ -171,6 +171,9 @@
 {
     extern void subarch_init_memory(struct domain *);
 
+    unsigned long i, j, pfn, nr_pfns;
+    struct pfn_info *page;
+
     memset(percpu_info, 0, sizeof(percpu_info));
 
     /*
@@ -184,13 +187,42 @@
 
     /*
      * Initialise our DOMID_IO domain.
-     * This domain owns no pages but is considered a special case when
-     * mapping I/O pages, as the mappings occur at the priv of the caller.
+     * This domain owns I/O pages that are within the range of the pfn_info
+     * array. Mappings occur at the priv of the caller.
      */
     dom_io = alloc_domain_struct();
     atomic_set(&dom_io->refcnt, 1);
     dom_io->id = DOMID_IO;
 
+    /* First 1MB of RAM is historically marked as I/O. */
+    for ( i = 0; i < 0x100; i++ )
+    {
+        page = &frame_table[i];
+        page->count_info        = PGC_allocated | 1;
+        page->u.inuse.type_info = PGT_writable_page | PGT_validated | 1;
+        page_set_owner(page, dom_io);
+    }
+ 
+    /* Any non-RAM areas in the e820 map are considered to be for I/O. */
+    for ( i = 0; i < e820.nr_map; i++ )
+    {
+        if ( e820.map[i].type == E820_RAM )
+            continue;
+        pfn = e820.map[i].addr >> PAGE_SHIFT;
+        nr_pfns = (e820.map[i].size +
+                   (e820.map[i].addr & ~PAGE_MASK) +
+                   ~PAGE_MASK) >> PAGE_SHIFT;
+        for ( j = 0; j < nr_pfns; j++ )
+        {
+            if ( !pfn_valid(pfn+j) )
+                continue;
+            page = &frame_table[pfn+j];
+            page->count_info        = PGC_allocated | 1;
+            page->u.inuse.type_info = PGT_writable_page | PGT_validated | 1;
+            page_set_owner(page, dom_io);
+        }
+    }
+
     subarch_init_memory(dom_xen);
 }
 
@@ -306,13 +338,7 @@
 {
     struct pfn_info *page = &frame_table[page_nr];
 
-    if ( unlikely(!pfn_is_ram(page_nr)) )
-    {
-        MEM_LOG("Pfn %p is not RAM", page_nr);
-        return 0;
-    }
-
-    if ( unlikely(!get_page(page, d)) )
+    if ( unlikely(!pfn_valid(page_nr)) || unlikely(!get_page(page, d)) )
     {
         MEM_LOG("Could not get page ref for pfn %p", page_nr);
         return 0;
@@ -419,20 +445,25 @@
         return 0;
     }
 
-    if ( unlikely(!pfn_is_ram(mfn)) )
+    if ( unlikely(!pfn_valid(mfn)) ||
+         unlikely(page_get_owner(page) == dom_io) )
     {
-        /* Revert to caller privileges if FD == DOMID_IO. */
+        /* DOMID_IO reverts to caller for privilege checks. */
         if ( d == dom_io )
             d = current->domain;
 
-        if ( IS_PRIV(d) )
-            return 1;
+        if ( (!IS_PRIV(d)) &&
+             (!IS_CAPABLE_PHYSDEV(d) || !domain_iomem_in_pfn(d, mfn)) )
+        {
+            MEM_LOG("Non-privileged attempt to map I/O space %08lx", mfn);
+            return 0;
+        }
 
-        if ( IS_CAPABLE_PHYSDEV(d) )
-            return domain_iomem_in_pfn(d, mfn);
+        /* No reference counting for out-of-range I/O pages. */
+        if ( !pfn_valid(mfn) )
+            return 1;
 
-        MEM_LOG("Non-privileged attempt to map I/O space %p", mfn);
-        return 0;
+        d = dom_io;
     }
 
     return ((l1v & _PAGE_RW) ?
@@ -529,7 +560,7 @@
     struct pfn_info *page = &frame_table[pfn];
     struct domain   *e;
 
-    if ( !(l1v & _PAGE_PRESENT) || !pfn_is_ram(pfn) )
+    if ( !(l1v & _PAGE_PRESENT) || !pfn_valid(pfn) )
         return;
 
     e = page_get_owner(page);
@@ -2851,7 +2882,7 @@
         gntref = (grant_ref_t)((val & 0xFF00) | ((ptr >> 2) & 0x00FF));
         
         if ( unlikely(IS_XEN_HEAP_FRAME(page)) ||
-             unlikely(!pfn_is_ram(pfn)) ||
+             unlikely(!pfn_valid(pfn)) ||
              unlikely((e = find_domain_by_id(domid)) == NULL) )
         {
             MEM_LOG("Bad frame (%p) or bad domid (%d).\n", pfn, domid);
diff -Nru a/xen/arch/x86/shadow.c b/xen/arch/x86/shadow.c
--- a/xen/arch/x86/shadow.c     2005-04-14 19:03:29 -04:00
+++ b/xen/arch/x86/shadow.c     2005-04-14 19:03:29 -04:00
@@ -1727,7 +1727,7 @@
     struct out_of_sync_entry *entry = shadow_alloc_oos_entry(d);
 
     ASSERT(spin_is_locked(&d->arch.shadow_lock));
-    ASSERT(pfn_is_ram(mfn));
+    ASSERT(pfn_valid(mfn));
     ASSERT((page->u.inuse.type_info & PGT_type_mask) == PGT_writable_page);
 
     FSH_LOG("%s(gpfn=%p, mfn=%p) c=%p t=%p", __func__,
diff -Nru a/xen/common/grant_table.c b/xen/common/grant_table.c
--- a/xen/common/grant_table.c  2005-04-14 19:03:29 -04:00
+++ b/xen/common/grant_table.c  2005-04-14 19:03:29 -04:00
@@ -161,7 +161,7 @@
 
         frame = __gpfn_to_mfn_foreign(granting_d, sha->frame);
 
-        if ( unlikely(!pfn_is_ram(frame)) ||
+        if ( unlikely(!pfn_valid(frame)) ||
              unlikely(!((dev_hst_ro_flags & GNTMAP_readonly) ?
                         get_page(&frame_table[frame], granting_d) :
                         get_page_and_type(&frame_table[frame], granting_d,
diff -Nru a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
--- a/xen/include/asm-x86/page.h        2005-04-14 19:03:29 -04:00
+++ b/xen/include/asm-x86/page.h        2005-04-14 19:03:29 -04:00
@@ -35,14 +35,7 @@
 #define pfn_to_page(_pfn)   (frame_table + (_pfn))
 #define phys_to_page(kaddr) (frame_table + ((kaddr) >> PAGE_SHIFT))
 #define virt_to_page(kaddr) (frame_table + (__pa(kaddr) >> PAGE_SHIFT))
-#define VALID_PAGE(page)    ((page - frame_table) < max_mapnr)
-
-/*
- * NB. We don't currently track I/O holes in the physical RAM space.
- * For now we guess that I/O devices will be mapped in the first 1MB
- * (e.g., VGA buffers) or beyond the end of physical RAM.
- */
-#define pfn_is_ram(_pfn)    (((_pfn) > 0x100) && ((_pfn) < max_page))
+#define pfn_valid(_pfn)     ((_pfn) < max_page)
 
 /* High table entries are reserved by the hypervisor. */
 #define DOMAIN_ENTRIES_PER_L2_PAGETABLE     \
diff -Nru a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h      2005-04-14 19:03:29 -04:00
+++ b/xen/include/asm-x86/shadow.h      2005-04-14 19:03:29 -04:00
@@ -85,7 +85,7 @@
 
 static inline int mfn_is_page_table(unsigned long mfn)
 {
-    if ( !pfn_is_ram(mfn) )
+    if ( !pfn_valid(mfn) )
         return 0;
 
     return frame_table[mfn].count_info & PGC_page_table;
@@ -98,7 +98,7 @@
 
 static inline int mfn_out_of_sync(unsigned long mfn)
 {
-    if ( !pfn_is_ram(mfn) )
+    if ( !pfn_valid(mfn) )
         return 0;
 
     return frame_table[mfn].count_info & PGC_out_of_sync;
@@ -280,7 +280,7 @@
     if ( unlikely(!res) && IS_PRIV(d) && !shadow_mode_translate(d) &&
          !(l1_pgentry_val(nl1e) & L1_DISALLOW_MASK) &&
          (mfn = l1_pgentry_to_pfn(nl1e)) &&
-         pfn_is_ram(mfn) &&
+         pfn_valid(mfn) &&
          (owner = page_get_owner(pfn_to_page(l1_pgentry_to_pfn(nl1e)))) &&
          (d != owner) )
     {
@@ -426,7 +426,7 @@
 {
     u32 x, nx;
 
-    ASSERT(pfn_is_ram(smfn));
+    ASSERT(pfn_valid(smfn));
 
     x = frame_table[smfn].count_info;
     nx = x + 1;
@@ -455,7 +455,7 @@
 {
     u32 x, nx;
 
-    ASSERT(pfn_is_ram(smfn));
+    ASSERT(pfn_valid(smfn));
 
     x = frame_table[smfn].count_info;
     nx = x - 1;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] Forward port of I/O-page fix in 2.0 series., BitKeeper Bot <=