[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 02/21] xen/arm: make allocate_memory work for non 1:1 mapped guests



Extend allocate_memory to work for non 1:1 mapped domUs. Specifically,
memory allocated for domU will be mapped into the domU pseudo-physical
address space at the appropriate addresses according to the guest memory
map: GUEST_RAM0_BASE and GUEST_RAM1_BASE.

To do that, insert_11_bank has been extended to deal with non-dom0
mappings starting from GUEST_RAM0_BASE. insert_11_bank has been renamed
to insert_bank.

Signed-off-by: Stefano Stabellini <stefanos@xxxxxxxxxx>

---
Changes in v2:
- new patch
---
 xen/arch/arm/domain_build.c | 57 ++++++++++++++++++++++++++++++++++-----------
 1 file changed, 44 insertions(+), 13 deletions(-)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index 182e3d5..2a6619a 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -97,27 +97,51 @@ static unsigned int get_allocation_size(paddr_t size)
  * Returns false if the memory would be below bank 0 or we have run
  * out of banks. In this case it will free the pages.
  */
-static bool insert_11_bank(struct domain *d,
-                           struct kernel_info *kinfo,
-                           struct page_info *pg,
-                           unsigned int order)
+static bool insert_bank(struct domain *d,
+                        struct kernel_info *kinfo,
+                        struct page_info *pg,
+                        unsigned int order,
+                        bool map_11)
 {
-    int res, i;
+    int res, i, nr_mem_banks = map_11 ? NR_MEM_BANKS : 2;
     mfn_t smfn;
     paddr_t start, size;
+    struct membank *bank;
 
     smfn = page_to_mfn(pg);
     start = mfn_to_maddr(smfn);
     size = pfn_to_paddr(1UL << order);
+    if ( !map_11 )
+    {
+        start = GUEST_RAM0_BASE;
+        if ( kinfo->mem.nr_banks > 0 )
+        {
+            for( i = 0; i < kinfo->mem.nr_banks; i++ )
+            {
+                bank = &kinfo->mem.bank[i];
+                start = bank->start + bank->size;
+            }
+            if ( bank->start == GUEST_RAM0_BASE &&
+                    start + size > (GUEST_RAM0_BASE + GUEST_RAM0_SIZE) )
+                start = GUEST_RAM1_BASE;
+            if ( bank->start == GUEST_RAM1_BASE &&
+                    start + size > (GUEST_RAM1_BASE + GUEST_RAM1_SIZE) )
+            {
+                D11PRINT("Allocation of domain memory exceeds max amount\n");
+                goto fail;
+            }
+        }
+    }
 
-    D11PRINT("Allocated %#"PRIpaddr"-%#"PRIpaddr" (%ldMB/%ldMB, order %d)\n",
+    D11PRINT("Allocated %#"PRIpaddr"-%#"PRIpaddr":%#"PRIpaddr"-%#"PRIpaddr" 
(%ldMB/%ldMB, order %d)\n",
+             mfn_to_maddr(smfn), mfn_to_maddr(smfn) + size,
              start, start + size,
              1UL << (order + PAGE_SHIFT - 20),
              /* Don't want format this as PRIpaddr (16 digit hex) */
              (unsigned long)(kinfo->unassigned_mem >> 20),
              order);
 
-    if ( kinfo->mem.nr_banks > 0 &&
+    if ( map_11 && kinfo->mem.nr_banks > 0 &&
          size < MB(128) &&
          start + size < kinfo->mem.bank[0].start )
     {
@@ -125,7 +149,7 @@ static bool insert_11_bank(struct domain *d,
         goto fail;
     }
 
-    res = guest_physmap_add_page(d, _gfn(mfn_x(smfn)), smfn, order);
+    res = guest_physmap_add_page(d, gaddr_to_gfn(start), smfn, order);
     if ( res )
         panic("Failed map pages to DOM0: %d", res);
 
@@ -141,7 +165,7 @@ static bool insert_11_bank(struct domain *d,
 
     for( i = 0; i < kinfo->mem.nr_banks; i++ )
     {
-        struct membank *bank = &kinfo->mem.bank[i];
+        bank = &kinfo->mem.bank[i];
 
         /* If possible merge new memory into the start of the bank */
         if ( bank->start == start+size )
@@ -164,7 +188,7 @@ static bool insert_11_bank(struct domain *d,
          * could have inserted the memory into/before we would already
          * have done so, so this must be the right place.
          */
-        if ( start + size < bank->start && kinfo->mem.nr_banks < NR_MEM_BANKS )
+        if ( start + size < bank->start && kinfo->mem.nr_banks < nr_mem_banks )
         {
             memmove(bank + 1, bank,
                     sizeof(*bank) * (kinfo->mem.nr_banks - i));
@@ -175,7 +199,7 @@ static bool insert_11_bank(struct domain *d,
         }
     }
 
-    if ( i == kinfo->mem.nr_banks && kinfo->mem.nr_banks < NR_MEM_BANKS )
+    if ( i == kinfo->mem.nr_banks && kinfo->mem.nr_banks < nr_mem_banks )
     {
         struct membank *bank = &kinfo->mem.bank[kinfo->mem.nr_banks];
 
@@ -253,6 +277,7 @@ static void allocate_memory(struct domain *d, struct 
kernel_info *kinfo)
     struct page_info *pg;
     unsigned int order = get_allocation_size(kinfo->unassigned_mem);
     int i;
+    bool hwdom = d->domain_id == 0;
 
     bool lowmem = true;
     unsigned int bits;
@@ -263,6 +288,12 @@ static void allocate_memory(struct domain *d, struct 
kernel_info *kinfo)
 
     kinfo->mem.nr_banks = 0;
 
+    if ( !hwdom )
+    {
+        lowmem = false;
+        goto got_bank0;
+    }
+
     /*
      * First try and allocate the largest thing we can as low as
      * possible to be bank 0.
@@ -274,7 +305,7 @@ static void allocate_memory(struct domain *d, struct 
kernel_info *kinfo)
             pg = alloc_domheap_pages(d, order, MEMF_bits(bits));
             if ( pg != NULL )
             {
-                if ( !insert_11_bank(d, kinfo, pg, order) )
+                if ( !insert_bank(d, kinfo, pg, order, hwdom) )
                     BUG(); /* Cannot fail for first bank */
 
                 goto got_bank0;
@@ -319,7 +350,7 @@ static void allocate_memory(struct domain *d, struct 
kernel_info *kinfo)
             break;
         }
 
-        if ( !insert_11_bank(d, kinfo, pg, order) )
+        if ( !insert_bank(d, kinfo, pg, order, hwdom) )
         {
             if ( kinfo->mem.nr_banks == NR_MEM_BANKS )
                 /* Nothing more we can do. */
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.