[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] xen: don't compile pv-specific parts if XEN_PV isn't configured



xenbus_client.c contains some functions specific for pv guests.
Enclose them with #ifdef CONFIG_XEN_PV to avoid compiling them when
they are not needed (e.g. on ARM).

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
 drivers/xen/xenbus/xenbus_client.c | 130 +++++++++++++++++++------------------
 1 file changed, 67 insertions(+), 63 deletions(-)

diff --git a/drivers/xen/xenbus/xenbus_client.c 
b/drivers/xen/xenbus/xenbus_client.c
index 82a8866758ee..a1c17000129b 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -519,64 +519,6 @@ static int __xenbus_map_ring(struct xenbus_device *dev,
        return err;
 }
 
-static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
-                                    grant_ref_t *gnt_refs,
-                                    unsigned int nr_grefs,
-                                    void **vaddr)
-{
-       struct xenbus_map_node *node;
-       struct vm_struct *area;
-       pte_t *ptes[XENBUS_MAX_RING_GRANTS];
-       phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
-       int err = GNTST_okay;
-       int i;
-       bool leaked;
-
-       *vaddr = NULL;
-
-       if (nr_grefs > XENBUS_MAX_RING_GRANTS)
-               return -EINVAL;
-
-       node = kzalloc(sizeof(*node), GFP_KERNEL);
-       if (!node)
-               return -ENOMEM;
-
-       area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes);
-       if (!area) {
-               kfree(node);
-               return -ENOMEM;
-       }
-
-       for (i = 0; i < nr_grefs; i++)
-               phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr;
-
-       err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
-                               phys_addrs,
-                               GNTMAP_host_map | GNTMAP_contains_pte,
-                               &leaked);
-       if (err)
-               goto failed;
-
-       node->nr_handles = nr_grefs;
-       node->pv.area = area;
-
-       spin_lock(&xenbus_valloc_lock);
-       list_add(&node->next, &xenbus_valloc_pages);
-       spin_unlock(&xenbus_valloc_lock);
-
-       *vaddr = area->addr;
-       return 0;
-
-failed:
-       if (!leaked)
-               free_vm_area(area);
-       else
-               pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
-
-       kfree(node);
-       return err;
-}
-
 struct map_ring_valloc_hvm
 {
        unsigned int idx;
@@ -725,6 +667,65 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, 
void *vaddr)
 }
 EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
 
+#ifdef CONFIG_XEN_PV
+static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
+                                    grant_ref_t *gnt_refs,
+                                    unsigned int nr_grefs,
+                                    void **vaddr)
+{
+       struct xenbus_map_node *node;
+       struct vm_struct *area;
+       pte_t *ptes[XENBUS_MAX_RING_GRANTS];
+       phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
+       int err = GNTST_okay;
+       int i;
+       bool leaked;
+
+       *vaddr = NULL;
+
+       if (nr_grefs > XENBUS_MAX_RING_GRANTS)
+               return -EINVAL;
+
+       node = kzalloc(sizeof(*node), GFP_KERNEL);
+       if (!node)
+               return -ENOMEM;
+
+       area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes);
+       if (!area) {
+               kfree(node);
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < nr_grefs; i++)
+               phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr;
+
+       err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
+                               phys_addrs,
+                               GNTMAP_host_map | GNTMAP_contains_pte,
+                               &leaked);
+       if (err)
+               goto failed;
+
+       node->nr_handles = nr_grefs;
+       node->pv.area = area;
+
+       spin_lock(&xenbus_valloc_lock);
+       list_add(&node->next, &xenbus_valloc_pages);
+       spin_unlock(&xenbus_valloc_lock);
+
+       *vaddr = area->addr;
+       return 0;
+
+failed:
+       if (!leaked)
+               free_vm_area(area);
+       else
+               pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
+
+       kfree(node);
+       return err;
+}
+
 static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
 {
        struct xenbus_map_node *node;
@@ -788,6 +789,12 @@ static int xenbus_unmap_ring_vfree_pv(struct xenbus_device 
*dev, void *vaddr)
        return err;
 }
 
+static const struct xenbus_ring_ops ring_ops_pv = {
+       .map = xenbus_map_ring_valloc_pv,
+       .unmap = xenbus_unmap_ring_vfree_pv,
+};
+#endif
+
 struct unmap_ring_vfree_hvm
 {
        unsigned int idx;
@@ -916,11 +923,6 @@ enum xenbus_state xenbus_read_driver_state(const char 
*path)
 }
 EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
 
-static const struct xenbus_ring_ops ring_ops_pv = {
-       .map = xenbus_map_ring_valloc_pv,
-       .unmap = xenbus_unmap_ring_vfree_pv,
-};
-
 static const struct xenbus_ring_ops ring_ops_hvm = {
        .map = xenbus_map_ring_valloc_hvm,
        .unmap = xenbus_unmap_ring_vfree_hvm,
@@ -928,8 +930,10 @@ static const struct xenbus_ring_ops ring_ops_hvm = {
 
 void __init xenbus_ring_ops_init(void)
 {
+#ifdef CONFIG_XEN_PV
        if (!xen_feature(XENFEAT_auto_translated_physmap))
                ring_ops = &ring_ops_pv;
        else
+#endif
                ring_ops = &ring_ops_hvm;
 }
-- 
2.12.3


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.