[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 3/6] xen/arm: implement page reference and grant table functions needed by grant_table.c



The implementation is strongly "inspired" by their x86 counterparts,
except that we assume paging_mode_external and paging_mode_translate.

TODO: read_only mappings and gnttab_mark_dirty.


Changes in v2:

- create_grant_host_mapping returns error for read-only mappings;
- remove get_page_light reference.


Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
---
 xen/arch/arm/dummy.S |    9 ----
 xen/arch/arm/mm.c    |  110 ++++++++++++++++++++++++++++++++++++++++++++++++++
 xen/arch/arm/p2m.c   |   77 ++++++++++++++++++++++++-----------
 3 files changed, 163 insertions(+), 33 deletions(-)

diff --git a/xen/arch/arm/dummy.S b/xen/arch/arm/dummy.S
index cab9522..baced25 100644
--- a/xen/arch/arm/dummy.S
+++ b/xen/arch/arm/dummy.S
@@ -23,18 +23,10 @@ DUMMY(arch_vcpu_reset);
 NOP(update_vcpu_system_time);
 
 /* Page Reference & Type Maintenance */
-DUMMY(get_page);
 DUMMY(get_page_type);
-DUMMY(page_get_owner_and_reference);
-DUMMY(put_page);
 DUMMY(put_page_type);
 
 /* Grant Tables */
-DUMMY(create_grant_host_mapping);
-DUMMY(gnttab_clear_flag);
-DUMMY(gnttab_mark_dirty);
-DUMMY(is_iomem_page);
-DUMMY(replace_grant_host_mapping);
 DUMMY(steal_page);
 
 /* Page Offlining */
@@ -45,7 +37,6 @@ DUMMY(domain_get_maximum_gpfn);
 DUMMY(domain_relinquish_resources);
 DUMMY(domain_set_time_offset);
 DUMMY(dom_cow);
-DUMMY(gmfn_to_mfn);
 DUMMY(hypercall_create_continuation);
 DUMMY(send_timer_event);
 DUMMY(share_xen_page_with_privileged_guests);
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 1832e7f..01a6781 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -555,6 +555,116 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
 
     return 0;
 }
+
+struct domain *page_get_owner_and_reference(struct page_info *page)
+{
+    unsigned long x, y = page->count_info;
+
+    do {
+        x = y;
+        /*
+         * Count ==  0: Page is not allocated, so we cannot take a reference.
+         * Count == -1: Reference count would wrap, which is invalid. 
+         */
+        if ( unlikely(((x + 1) & PGC_count_mask) <= 1) )
+            return NULL;
+    }
+    while ( (y = cmpxchg(&page->count_info, x, x + 1)) != x );
+
+    return page_get_owner(page);
+}
+
+void put_page(struct page_info *page)
+{
+    unsigned long nx, x, y = page->count_info;
+
+    do {
+        ASSERT((y & PGC_count_mask) != 0);
+        x  = y;
+        nx = x - 1;
+    }
+    while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) );
+
+    if ( unlikely((nx & PGC_count_mask) == 0) )
+    {
+        free_domheap_page(page);
+    }
+}
+
+int get_page(struct page_info *page, struct domain *domain)
+{
+    struct domain *owner = page_get_owner_and_reference(page);
+
+    if ( likely(owner == domain) )
+        return 1;
+
+    if ( owner != NULL )
+        put_page(page);
+
+    return 0;
+}
+
+void gnttab_clear_flag(unsigned long nr, uint16_t *addr)
+{
+    /*
+     * Note that this cannot be clear_bit(), as the access must be
+     * confined to the specified 2 bytes.
+     */
+    uint16_t mask = ~(1 << nr), old;
+
+    do {
+        old = *addr;
+    } while (cmpxchg(addr, old, old & mask) != old);
+}
+
+void gnttab_mark_dirty(struct domain *d, unsigned long l)
+{
+    /* XXX: mark dirty */
+}
+
+int create_grant_host_mapping(unsigned long addr, unsigned long frame, 
+                              unsigned int flags, unsigned int cache_flags)
+{
+    int rc;
+
+    if ( cache_flags  || (flags & ~GNTMAP_readonly) != GNTMAP_host_map )
+        return GNTST_general_error;
+
+    /* XXX: read only mappings */
+    if ( flags & GNTMAP_readonly )
+    {
+        printk("%s: read only mappings not implemented yet\n", __func__);
+        return GNTST_general_error;
+    }
+
+    rc = guest_physmap_add_page(current->domain,
+                                 addr >> PAGE_SHIFT, frame, 0);
+    if ( rc )
+        return GNTST_general_error;
+    else
+        return GNTST_okay;
+}
+
+int replace_grant_host_mapping(unsigned long addr, unsigned long mfn,
+        unsigned long new_addr, unsigned int flags)
+{
+    unsigned long gfn = (unsigned long)(addr >> PAGE_SHIFT);
+    struct domain *d = current->domain;
+
+    if ( new_addr != 0 || (flags & GNTMAP_contains_pte) )
+        return GNTST_general_error;
+
+    guest_physmap_remove_page(d, gfn, mfn, 0);
+
+    return GNTST_okay;
+}
+
+int is_iomem_page(unsigned long mfn)
+{
+    if ( !mfn_valid(mfn) )
+        return 1;
+    return 0;
+}
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 6066aac..a4c7e6f 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -156,8 +156,14 @@ static int p2m_create_entry(struct domain *d,
     return 0;
 }
 
+enum p2m_operation {
+    INSERT,
+    ALLOCATE,
+    REMOVE
+};
+
 static int create_p2m_entries(struct domain *d,
-                     int alloc,
+                     enum p2m_operation op,
                      paddr_t start_gpaddr,
                      paddr_t end_gpaddr,
                      paddr_t maddr,
@@ -227,25 +233,39 @@ static int create_p2m_entries(struct domain *d,
         }
 
         /* Allocate a new RAM page and attach */
-        if (alloc)
-        {
-            struct page_info *page;
-            lpae_t pte;
-
-            rc = -ENOMEM;
-            page = alloc_domheap_page(d, 0);
-            if ( page == NULL ) {
-                printk("p2m_populate_ram: failed to allocate page\n");
-                goto out;
-            }
-
-            pte = mfn_to_p2m_entry(page_to_mfn(page), mattr);
-
-            write_pte(&third[third_table_offset(addr)], pte);
-        } else {
-            lpae_t pte = mfn_to_p2m_entry(maddr >> PAGE_SHIFT, mattr);
-            write_pte(&third[third_table_offset(addr)], pte);
-            maddr += PAGE_SIZE;
+        switch (op) {
+            case ALLOCATE:
+                {
+                    struct page_info *page;
+                    lpae_t pte;
+
+                    rc = -ENOMEM;
+                    page = alloc_domheap_page(d, 0);
+                    if ( page == NULL ) {
+                        printk("p2m_populate_ram: failed to allocate page\n");
+                        goto out;
+                    }
+
+                    pte = mfn_to_p2m_entry(page_to_mfn(page), mattr);
+
+                    write_pte(&third[third_table_offset(addr)], pte);
+                }
+                break;
+            case INSERT:
+                {
+                    lpae_t pte = mfn_to_p2m_entry(maddr >> PAGE_SHIFT, mattr);
+                    write_pte(&third[third_table_offset(addr)], pte);
+                    maddr += PAGE_SIZE;
+                }
+                break;
+            case REMOVE:
+                {
+                    lpae_t pte;
+                    memset(&pte, 0x00, sizeof(pte));
+                    write_pte(&third[third_table_offset(addr)], pte);
+                    maddr += PAGE_SIZE;
+                }
+                break;
         }
     }
 
@@ -265,7 +285,7 @@ int p2m_populate_ram(struct domain *d,
                      paddr_t start,
                      paddr_t end)
 {
-    return create_p2m_entries(d, 1, start, end, 0, MATTR_MEM);
+    return create_p2m_entries(d, ALLOCATE, start, end, 0, MATTR_MEM);
 }
 
 int map_mmio_regions(struct domain *d,
@@ -273,7 +293,7 @@ int map_mmio_regions(struct domain *d,
                      paddr_t end_gaddr,
                      paddr_t maddr)
 {
-    return create_p2m_entries(d, 0, start_gaddr, end_gaddr, maddr, MATTR_DEV);
+    return create_p2m_entries(d, INSERT, start_gaddr, end_gaddr, maddr, 
MATTR_DEV);
 }
 
 int guest_physmap_add_page(struct domain *d,
@@ -290,7 +310,7 @@ int guest_physmap_add_page(struct domain *d,
         once = 0;
     }
 
-    return create_p2m_entries(d, 0, gpfn << PAGE_SHIFT,
+    return create_p2m_entries(d, INSERT, gpfn << PAGE_SHIFT,
                               (gpfn + (1<<page_order)) << PAGE_SHIFT,
                               mfn << PAGE_SHIFT, MATTR_MEM);
 }
@@ -299,7 +319,9 @@ void guest_physmap_remove_page(struct domain *d,
                                unsigned long gpfn,
                                unsigned long mfn, unsigned int page_order)
 {
-    ASSERT(0);
+    create_p2m_entries(d, REMOVE, gpfn << PAGE_SHIFT,
+                       (gpfn + (1<<page_order)) << PAGE_SHIFT,
+                       mfn << PAGE_SHIFT, MATTR_MEM);
 }
 
 int p2m_alloc_table(struct domain *d)
@@ -348,6 +370,13 @@ int p2m_init(struct domain *d)
 
     return 0;
 }
+
+unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn)
+{
+    paddr_t p = p2m_lookup(d, gpfn << PAGE_SHIFT);
+    return p >> PAGE_SHIFT;
+}
+
 /*
  * Local variables:
  * mode: C
-- 
1.7.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.