[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [RFC PATCH 73/84] x86/mm: Move vmap_to_mfn() to mm.c and rename to virt_to_mfn_walk().



Hi,

On 9/26/19 10:46 AM, hongyax@xxxxxxxxxx wrote:
From: Hongyan Xia <hongyax@xxxxxxxxxx>

Obviously, vmap_to_mfn is a generic mechanism to walk the page table to
find the mfn, not vmap specific. Also the difference from virt_to_mfn()
is that it actually walks the page table instead of relying on a direct
map.

vmap_to_mfn is the abstraction for common code. How this is implemented is arch dependent and therefore the name should stick like when call by common code.

For x86, you are free to alias vmap_to_mfn() to virt_to_mfn_walk(). It would also be good if you document in the code why a code would select virt_to_mfn_walk() and not virt_to_mfn().


Signed-off-by: Hongyan Xia <hongyax@xxxxxxxxxx>
---
  xen/arch/x86/mm.c        | 13 +++++++++++++
  xen/common/vmap.c        | 15 +--------------
  xen/include/asm-x86/mm.h |  2 ++
  xen/include/xen/vmap.h   |  3 ---
  4 files changed, 16 insertions(+), 17 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index f30b5b3951..ab760ecc1f 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5056,6 +5056,19 @@ l1_pgentry_t *virt_to_xen_l1e(unsigned long v)
      return pl1e;
  }
+unsigned long virt_to_mfn_walk(void *va)
+{
+    l1_pgentry_t *pl1e = virt_to_xen_l1e((unsigned long)(va));
+    unsigned long ret = l1e_get_pfn(*pl1e);
+    unmap_xen_pagetable(pl1e);
+    return ret;
+}
+
+struct page_info *virt_to_page_walk(void *va)
+{
+    return mfn_to_page(_mfn(virt_to_mfn_walk(va)));
+}
+
  /* Convert to from superpage-mapping flags for map_pages_to_xen(). */
  #define l1f_to_lNf(f) (((f) & _PAGE_PRESENT) ? ((f) |  _PAGE_PSE) : (f))
  #define lNf_to_l1f(f) (((f) & _PAGE_PRESENT) ? ((f) & ~_PAGE_PSE) : (f))
diff --git a/xen/common/vmap.c b/xen/common/vmap.c
index fcdb8495c8..4323c6811f 100644
--- a/xen/common/vmap.c
+++ b/xen/common/vmap.c
@@ -19,19 +19,6 @@ static unsigned int __read_mostly vm_end[VMAP_REGION_NR];
  /* lowest known clear bit in the bitmap */
  static unsigned int vm_low[VMAP_REGION_NR];
-mfn_t vmap_to_mfn(void *va)
-{
-    l1_pgentry_t *pl1e = virt_to_xen_l1e((unsigned long)(va));
-    mfn_t ret = _mfn(l1e_get_pfn(*pl1e));
-    unmap_xen_pagetable(pl1e);
-    return ret;
-}
-
-struct page_info *vmap_to_page(void *va)
-{
-    return mfn_to_page(vmap_to_mfn(va));
-}
-

Please avoid to add code in a patch that is move it later on. Instead you should put the code in the correct place from the beginning.

  void __init vm_init_type(enum vmap_region type, void *start, void *end)
  {
      unsigned int i, nr;
@@ -332,7 +319,7 @@ void vfree(void *va)
for ( i = 0; i < pages; i++ )
      {
-        struct page_info *page = vmap_to_page(va + i * PAGE_SIZE);
+        struct page_info *page = virt_to_page_walk(va + i * PAGE_SIZE);
ASSERT(page);
          page_list_add(page, &pg_list);
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index a4b3c9b7af..76ba56bdc3 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -643,6 +643,8 @@ void free_xen_pagetable(mfn_t mfn);
      } while (0)
l1_pgentry_t *virt_to_xen_l1e(unsigned long v);
+unsigned long virt_to_mfn_walk(void *va);
+struct page_info *virt_to_page_walk(void *va);
DECLARE_PER_CPU(mfn_t, root_pgt_mfn); diff --git a/xen/include/xen/vmap.h b/xen/include/xen/vmap.h
index 3d69727a9d..369560e620 100644
--- a/xen/include/xen/vmap.h
+++ b/xen/include/xen/vmap.h
@@ -23,9 +23,6 @@ void *vmalloc_xen(size_t size);
  void *vzalloc(size_t size);
  void vfree(void *va);
-mfn_t vmap_to_mfn(void *va);
-struct page_info *vmap_to_page(void *va);
-
  void __iomem *ioremap(paddr_t, size_t);
static inline void iounmap(void __iomem *va)


Cheers,

--
Julien Grall

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.