[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [v8][PATCH 08/17] hvmloader/mmio: reconcile guest mmio with reserved device memory



On 2014/12/2 17:11, Tian, Kevin wrote:
From: Chen, Tiejun
Sent: Monday, December 01, 2014 5:24 PM

We need to make sure all mmio allocation don't overlap
any rdm, reserved device memory. Here we just skip
all reserved device memory range in mmio space.

Signed-off-by: Tiejun Chen <tiejun.chen@xxxxxxxxx>
---
  tools/firmware/hvmloader/pci.c  | 54
++++++++++++++++++++++++++++++++++++++++-
  tools/firmware/hvmloader/util.c |  9 +++++++
  tools/firmware/hvmloader/util.h |  2 ++
  3 files changed, 64 insertions(+), 1 deletion(-)

diff --git a/tools/firmware/hvmloader/pci.c b/tools/firmware/hvmloader/pci.c
index 4e8d803..fc22ab3 100644
--- a/tools/firmware/hvmloader/pci.c
+++ b/tools/firmware/hvmloader/pci.c
@@ -38,6 +38,30 @@ uint64_t pci_hi_mem_start = 0, pci_hi_mem_end = 0;
  enum virtual_vga virtual_vga = VGA_none;
  unsigned long igd_opregion_pgbase = 0;

+static unsigned int need_skip_rmrr;
+extern struct xen_reserved_device_memory *rdm_map;
+
+static unsigned int
+check_reserved_device_memory_map(uint64_t mmio_base, uint64_t
mmio_max)
+{
+    uint32_t i;
+    uint64_t rdm_start, rdm_end;
+    unsigned int nr_rdm_entries =
hvm_get_reserved_device_memory_map();
+
+    for ( i = 0; i < nr_rdm_entries; i++ )
+    {
+        rdm_start = (uint64_t)rdm_map[i].start_pfn << PAGE_SHIFT;
+        rdm_end = rdm_start + ((uint64_t)rdm_map[i].nr_pages <<
PAGE_SHIFT);
+        if ( check_rdm_hole_conflict(mmio_base, mmio_max -
mmio_base,
+                                     rdm_start, rdm_end -
rdm_start) )
+        {
+            need_skip_rmrr++;
+        }
+    }
+
+    return nr_rdm_entries;
+}
+

I don't understand the use of need_skip_rmrr here. What does the counter 
actually
mean here? Also the function is not well organized. Usually the value returned 
is
the major purpose of the function, but it looks the function is actually for 
need_skip_rmrr.
If it's the actual purpose, better to rename the function and move 
nr_rdm_entries
directly in the outer function.

See online below.


  void pci_setup(void)
  {
      uint8_t is_64bar, using_64bar, bar64_relocate = 0;
@@ -59,8 +83,10 @@ void pci_setup(void)
          uint32_t bar_reg;
          uint64_t bar_sz;
      } *bars = (struct bars *)scratch_start;
-    unsigned int i, nr_bars = 0;
+    unsigned int i, j, nr_bars = 0;
      uint64_t mmio_hole_size = 0;
+    unsigned int nr_rdm_entries;
+    uint64_t rdm_start, rdm_end;

      const char *s;
      /*
@@ -338,6 +364,14 @@ void pci_setup(void)
      io_resource.base = 0xc000;
      io_resource.max = 0x10000;

+    /* Check low mmio range. */
+    nr_rdm_entries =
check_reserved_device_memory_map(mem_resource.base,
+
mem_resource.max);
+    /* Check high mmio range. */
+    if ( nr_rdm_entries )
+        nr_rdm_entries =
check_reserved_device_memory_map(high_mem_resource.base,
+
high_mem_resource.max);
+
      /* Assign iomem and ioport resources in descending order of size. */
      for ( i = 0; i < nr_bars; i++ )
      {
@@ -393,8 +427,26 @@ void pci_setup(void)
          }

          base = (resource->base  + bar_sz - 1) & ~(uint64_t)(bar_sz - 1);
+ reallocate_mmio:
          bar_data |= (uint32_t)base;
          bar_data_upper = (uint32_t)(base >> 32);
+
+        if ( need_skip_rmrr )
+        {
+            for ( j = 0; j < nr_rdm_entries; j++ )
+            {
+                rdm_start = (uint64_t)rdm_map[j].start_pfn <<
PAGE_SHIFT;
+                rdm_end = rdm_start + ((uint64_t)rdm_map[j].nr_pages
<< PAGE_SHIFT);
+                if ( check_rdm_hole_conflict(base, bar_sz,
+                                             rdm_start, rdm_end -
rdm_start) )
+                {
+                    base = (rdm_end  + bar_sz - 1) & ~(uint64_t)(bar_sz
- 1);
+                    need_skip_rmrr--;
+                    goto reallocate_mmio;
+                }
+            }
+        }
+

here is the point which I don't understand. what's required here is just to
walk the rmrr entries for a given allocation, and if conflicting then move
the base. Then how does need_skip_rmrr helps here? and why do you
need pre-check on low/high region earlier?

We may have multiple RMRR entries but some of entries may overlap mmio. Here I use need_skip_rmrr to count this.

When we skip one RMRR entry to allocate a range for a pci device, we shouldn't check this entry again since this means we already cross that range, then we decrease need_skip_rmrr. Once need_skip_rmrr is changed as zero, even need_skip_rmrr is always zero, we should do nothing.

Thanks
Tiejun


          base += bar_sz;

          if ( (base < resource->base) || (base > resource->max) )
diff --git a/tools/firmware/hvmloader/util.c b/tools/firmware/hvmloader/util.c
index dd81fb6..8767897 100644
--- a/tools/firmware/hvmloader/util.c
+++ b/tools/firmware/hvmloader/util.c
@@ -887,6 +887,15 @@ unsigned int
hvm_get_reserved_device_memory_map(void)
      return nr_entries;
  }

+int check_rdm_hole_conflict(uint64_t start, uint64_t size,
+                            uint64_t rdm_start, uint64_t rdm_size)
+{
+    if ( start + size <= rdm_start || start >= rdm_start + rdm_size )
+        return 0;
+    else
+        return 1;
+}
+
  /*
   * Local variables:
   * mode: C
diff --git a/tools/firmware/hvmloader/util.h
b/tools/firmware/hvmloader/util.h
index e4f1851..9b02f95 100644
--- a/tools/firmware/hvmloader/util.h
+++ b/tools/firmware/hvmloader/util.h
@@ -242,6 +242,8 @@ int build_e820_table(struct e820entry *e820,
  void dump_e820_table(struct e820entry *e820, unsigned int nr);

  unsigned int hvm_get_reserved_device_memory_map(void);
+int check_rdm_hole_conflict(uint64_t start, uint64_t size,
+                            uint64_t rdm_start, uint64_t rdm_size);

  #ifndef NDEBUG
  void perform_tests(void);
--
1.9.1



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.