WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] Allow DMA address width to be overridden

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] Allow DMA address width to be overridden with boot parameters:
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Tue, 28 Nov 2006 22:10:15 +0000
Delivery-date: Tue, 28 Nov 2006 14:09:56 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID 39e40ccf7df5ea2e4142ccbece57d56fa64d1b71
# Parent  d30be569532d4b8910b74e62b818fa99e319eefd
Allow DMA address width to be overridden with boot parameters:

In Xen: dma_bits=28 (for example)
In Linux: swiotlb_bits=28 (for example)

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 linux-2.6-xen-sparse/arch/i386/kernel/swiotlb.c |   21 +++++++---
 xen/common/memory.c                             |    4 -
 xen/common/page_alloc.c                         |   49 +++++++++++++++++-------
 xen/include/asm-ia64/config.h                   |    4 -
 xen/include/asm-powerpc/config.h                |    2 
 xen/include/asm-x86/config.h                    |    4 -
 xen/include/xen/mm.h                            |    4 +
 7 files changed, 61 insertions(+), 27 deletions(-)

diff -r d30be569532d -r 39e40ccf7df5 
linux-2.6-xen-sparse/arch/i386/kernel/swiotlb.c
--- a/linux-2.6-xen-sparse/arch/i386/kernel/swiotlb.c   Tue Nov 28 17:04:32 
2006 +0000
+++ b/linux-2.6-xen-sparse/arch/i386/kernel/swiotlb.c   Tue Nov 28 17:15:22 
2006 +0000
@@ -48,7 +48,7 @@ EXPORT_SYMBOL(swiotlb);
 #define IO_TLB_SHIFT 11
 
 /* Width of DMA addresses in the IO TLB. 30 bits is a b44 limitation. */
-#define IO_TLB_DMA_BITS 30
+#define DEFAULT_IO_TLB_DMA_BITS 30
 
 static int swiotlb_force;
 static char *iotlb_virt_start;
@@ -97,6 +97,15 @@ static struct phys_addr {
  * Protect the above data structures in the map and unmap calls
  */
 static DEFINE_SPINLOCK(io_tlb_lock);
+
+static unsigned int io_tlb_dma_bits = DEFAULT_IO_TLB_DMA_BITS;
+static int __init
+setup_io_tlb_bits(char *str)
+{
+       io_tlb_dma_bits = simple_strtoul(str, NULL, 0);
+       return 0;
+}
+__setup("swiotlb_bits=", setup_io_tlb_bits);
 
 static int __init
 setup_io_tlb_npages(char *str)
@@ -158,7 +167,7 @@ swiotlb_init_with_default_size (size_t d
                int rc = xen_create_contiguous_region(
                        (unsigned long)iotlb_virt_start + (i << IO_TLB_SHIFT),
                        get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT),
-                       IO_TLB_DMA_BITS);
+                       io_tlb_dma_bits);
                BUG_ON(rc);
        }
 
@@ -183,10 +192,12 @@ swiotlb_init_with_default_size (size_t d
 
        printk(KERN_INFO "Software IO TLB enabled: \n"
               " Aperture:     %lu megabytes\n"
-              " Kernel range: 0x%016lx - 0x%016lx\n",
+              " Kernel range: 0x%016lx - 0x%016lx\n"
+              " Address size: %u bits\n",
               bytes >> 20,
               (unsigned long)iotlb_virt_start,
-              (unsigned long)iotlb_virt_start + bytes);
+              (unsigned long)iotlb_virt_start + bytes,
+              io_tlb_dma_bits);
 }
 
 void
@@ -654,7 +665,7 @@ int
 int
 swiotlb_dma_supported (struct device *hwdev, u64 mask)
 {
-       return (mask >= ((1UL << IO_TLB_DMA_BITS) - 1));
+       return (mask >= ((1UL << io_tlb_dma_bits) - 1));
 }
 
 EXPORT_SYMBOL(swiotlb_init);
diff -r d30be569532d -r 39e40ccf7df5 xen/common/memory.c
--- a/xen/common/memory.c       Tue Nov 28 17:04:32 2006 +0000
+++ b/xen/common/memory.c       Tue Nov 28 17:15:22 2006 +0000
@@ -328,7 +328,7 @@ static long memory_exchange(XEN_GUEST_HA
          (exch.out.address_bits <
           (get_order_from_pages(max_page) + PAGE_SHIFT)) )
     {
-        if ( exch.out.address_bits < MAX_DMADOM_BITS )
+        if ( exch.out.address_bits < dma_bitsize )
         {
             rc = -ENOMEM;
             goto fail_early;
@@ -541,7 +541,7 @@ long do_memory_op(unsigned long cmd, XEN
              (reservation.address_bits <
               (get_order_from_pages(max_page) + PAGE_SHIFT)) )
         {
-            if ( reservation.address_bits < MAX_DMADOM_BITS )
+            if ( reservation.address_bits < dma_bitsize )
                 return start_extent;
             args.memflags = MEMF_dma;
         }
diff -r d30be569532d -r 39e40ccf7df5 xen/common/page_alloc.c
--- a/xen/common/page_alloc.c   Tue Nov 28 17:04:32 2006 +0000
+++ b/xen/common/page_alloc.c   Tue Nov 28 17:15:22 2006 +0000
@@ -46,18 +46,39 @@ string_param("badpage", opt_badpage);
 string_param("badpage", opt_badpage);
 
 /*
+ * Bit width of the DMA heap.
+ */
+unsigned int  dma_bitsize = CONFIG_DMA_BITSIZE;
+unsigned long max_dma_mfn = (1UL << (CONFIG_DMA_BITSIZE - PAGE_SHIFT)) - 1;
+static void parse_dma_bits(char *s)
+{
+    unsigned int v = simple_strtol(s, NULL, 0);
+    if ( v >= (sizeof(long)*8 + PAGE_SHIFT) )
+    {
+        dma_bitsize = sizeof(long)*8 + PAGE_SHIFT;
+        max_dma_mfn = ~0UL;
+    }
+    else
+    {
+        dma_bitsize = v;
+        max_dma_mfn = (1UL << (dma_bitsize - PAGE_SHIFT)) - 1;
+    }
+}
+custom_param("dma_bits", parse_dma_bits);
+
+/*
  * Amount of memory to reserve in a low-memory (<4GB) pool for specific
  * allocation requests. Ordinary requests will not fall back to the
  * lowmem emergency pool.
  */
-static unsigned long lowmem_emergency_pool_pages;
-static void parse_lowmem_emergency_pool(char *s)
+static unsigned long dma_emergency_pool_pages;
+static void parse_dma_emergency_pool(char *s)
 {
     unsigned long long bytes;
     bytes = parse_size_and_unit(s, NULL);
-    lowmem_emergency_pool_pages = bytes >> PAGE_SHIFT;
-}
-custom_param("lowmem_emergency_pool", parse_lowmem_emergency_pool);
+    dma_emergency_pool_pages = bytes >> PAGE_SHIFT;
+}
+custom_param("dma_emergency_pool", parse_dma_emergency_pool);
 
 #define round_pgdown(_p)  ((_p)&PAGE_MASK)
 #define round_pgup(_p)    (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
@@ -248,7 +269,7 @@ unsigned long alloc_boot_pages(unsigned 
 #define NR_ZONES    3
 
 #define pfn_dom_zone_type(_pfn)                                 \
-    (((_pfn) <= MAX_DMADOM_PFN) ? MEMZONE_DMADOM : MEMZONE_DOM)
+    (((_pfn) <= max_dma_mfn) ? MEMZONE_DMADOM : MEMZONE_DOM)
 
 static struct list_head heap[NR_ZONES][MAX_NUMNODES][MAX_ORDER+1];
 
@@ -278,6 +299,8 @@ void end_boot_allocator(void)
         if ( curr_free )
             init_heap_pages(pfn_dom_zone_type(i), mfn_to_page(i), 1);
     }
+
+    printk("Domain heap initialised: DMA width %u bits\n", dma_bitsize);
 }
 
 /* 
@@ -575,13 +598,13 @@ void init_domheap_pages(paddr_t ps, padd
     s_tot = round_pgup(ps) >> PAGE_SHIFT;
     e_tot = round_pgdown(pe) >> PAGE_SHIFT;
 
-    s_dma = min(s_tot, MAX_DMADOM_PFN + 1);
-    e_dma = min(e_tot, MAX_DMADOM_PFN + 1);
+    s_dma = min(s_tot, max_dma_mfn + 1);
+    e_dma = min(e_tot, max_dma_mfn + 1);
     if ( s_dma < e_dma )
         init_heap_pages(MEMZONE_DMADOM, mfn_to_page(s_dma), e_dma - s_dma);
 
-    s_nrm = max(s_tot, MAX_DMADOM_PFN + 1);
-    e_nrm = max(e_tot, MAX_DMADOM_PFN + 1);
+    s_nrm = max(s_tot, max_dma_mfn + 1);
+    e_nrm = max(e_tot, max_dma_mfn + 1);
     if ( s_nrm < e_nrm )
         init_heap_pages(MEMZONE_DOM, mfn_to_page(s_nrm), e_nrm - s_nrm);
 }
@@ -655,7 +678,7 @@ struct page_info *__alloc_domheap_pages(
         if ( unlikely(pg == NULL) &&
              ((order > MAX_ORDER) ||
               (avail_heap_pages(MEMZONE_DMADOM,-1) <
-               (lowmem_emergency_pool_pages + (1UL << order)))) )
+               (dma_emergency_pool_pages + (1UL << order)))) )
             return NULL;
     }
 
@@ -799,8 +822,8 @@ unsigned long avail_domheap_pages(void)
     avail_nrm = avail_heap_pages(MEMZONE_DOM,-1);
 
     avail_dma = avail_heap_pages(MEMZONE_DMADOM,-1);
-    if ( avail_dma > lowmem_emergency_pool_pages )
-        avail_dma -= lowmem_emergency_pool_pages;
+    if ( avail_dma > dma_emergency_pool_pages )
+        avail_dma -= dma_emergency_pool_pages;
     else
         avail_dma = 0;
 
diff -r d30be569532d -r 39e40ccf7df5 xen/include/asm-ia64/config.h
--- a/xen/include/asm-ia64/config.h     Tue Nov 28 17:04:32 2006 +0000
+++ b/xen/include/asm-ia64/config.h     Tue Nov 28 17:15:22 2006 +0000
@@ -41,9 +41,7 @@
 #define CONFIG_IOSAPIC
 #define supervisor_mode_kernel (0)
 
-#define MAX_DMADOM_BITS 30
-#define MAX_DMADOM_MASK ((1UL << MAX_DMADOM_BITS) - 1)
-#define MAX_DMADOM_PFN  (MAX_DMADOM_MASK >> PAGE_SHIFT)
+#define CONFIG_DMA_BITSIZE 30
 
 /* If PERFC is used, include privop maps.  */
 #ifdef PERF_COUNTERS
diff -r d30be569532d -r 39e40ccf7df5 xen/include/asm-powerpc/config.h
--- a/xen/include/asm-powerpc/config.h  Tue Nov 28 17:04:32 2006 +0000
+++ b/xen/include/asm-powerpc/config.h  Tue Nov 28 17:15:22 2006 +0000
@@ -70,7 +70,7 @@ extern char __bss_start[];
 
 #define supervisor_mode_kernel (0)
 
-#define MAX_DMADOM_PFN (~0UL)
+#define CONFIG_DMA_BITSIZE 64
 
 #include <asm/powerpc64/config.h>
 
diff -r d30be569532d -r 39e40ccf7df5 xen/include/asm-x86/config.h
--- a/xen/include/asm-x86/config.h      Tue Nov 28 17:04:32 2006 +0000
+++ b/xen/include/asm-x86/config.h      Tue Nov 28 17:15:22 2006 +0000
@@ -82,9 +82,7 @@
 /* Debug stack is restricted to 8kB by guard pages. */
 #define DEBUG_STACK_SIZE 8192
 
-#define MAX_DMADOM_BITS 30
-#define MAX_DMADOM_MASK ((1UL << MAX_DMADOM_BITS) - 1)
-#define MAX_DMADOM_PFN  (MAX_DMADOM_MASK >> PAGE_SHIFT)
+#define CONFIG_DMA_BITSIZE 30
 
 #ifndef __ASSEMBLY__
 extern unsigned long _end; /* standard ELF symbol */
diff -r d30be569532d -r 39e40ccf7df5 xen/include/xen/mm.h
--- a/xen/include/xen/mm.h      Tue Nov 28 17:04:32 2006 +0000
+++ b/xen/include/xen/mm.h      Tue Nov 28 17:15:22 2006 +0000
@@ -89,6 +89,10 @@ int assign_pages(
 #define MAX_ORDER 20 /* 2^20 contiguous pages */
 #endif
 
+/* DMA heap parameters. */
+extern unsigned int  dma_bitsize;
+extern unsigned long max_dma_mfn;
+
 /* Automatic page scrubbing for dead domains. */
 extern struct list_head page_scrub_list;
 #define page_scrub_schedule_work()              \

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] Allow DMA address width to be overridden with boot parameters:, Xen patchbot-unstable <=