|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 1/6] xenbus: Support HVM backends
On Wed, Dec 14, 2011 at 03:12:09PM -0500, Daniel De Graaf wrote:
> Add HVM implementations of xenbus_(map,unmap)_ring_v(alloc,free) so
> that ring mappings can be done without using GNTMAP_contains_pte which
> is not supported on HVM.
>
> Signed-off-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
> ---
> drivers/xen/xenbus/xenbus_client.c | 155
> +++++++++++++++++++++++++++++-------
> 1 files changed, 125 insertions(+), 30 deletions(-)
>
> diff --git a/drivers/xen/xenbus/xenbus_client.c
> b/drivers/xen/xenbus/xenbus_client.c
> index 1906125..ecd695d 100644
> --- a/drivers/xen/xenbus/xenbus_client.c
> +++ b/drivers/xen/xenbus/xenbus_client.c
> @@ -32,16 +32,27 @@
>
> #include <linux/slab.h>
> #include <linux/types.h>
> +#include <linux/spinlock.h>
> #include <linux/vmalloc.h>
> #include <linux/export.h>
> #include <asm/xen/hypervisor.h>
> #include <asm/xen/page.h>
> #include <xen/interface/xen.h>
> #include <xen/interface/event_channel.h>
> +#include <xen/balloon.h>
> #include <xen/events.h>
> #include <xen/grant_table.h>
> #include <xen/xenbus.h>
>
> +struct xenbus_map_node {
> + struct list_head next;
> + struct page *page;
> + grant_handle_t handle;
> +};
> +
> +static DEFINE_SPINLOCK(xenbus_valloc_lock);
> +static LIST_HEAD(xenbus_valloc_pages);
Could we use this for what the PV version of xenbus_unmap_ring_vfree
does? (where it uses the vmlist_lock to look for the appropiate vaddr).
Could the vmlist_lock be removed and instead we can use this structure
for both PV and HVM?
> +
> const char *xenbus_strstate(enum xenbus_state state)
> {
> static const char *const name[] = {
> @@ -420,21 +431,8 @@ int xenbus_free_evtchn(struct xenbus_device *dev, int
> port)
> EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
>
>
> -/**
> - * xenbus_map_ring_valloc
> - * @dev: xenbus device
> - * @gnt_ref: grant reference
> - * @vaddr: pointer to address to be filled out by mapping
> - *
> - * Based on Rusty Russell's skeleton driver's map_page.
> - * Map a page of memory into this domain from another domain's grant table.
> - * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
> - * page to that address, and sets *vaddr to that address.
> - * Returns 0 on success, and GNTST_* (see
> xen/include/interface/grant_table.h)
> - * or -ENOMEM on error. If an error is returned, device will switch to
> - * XenbusStateClosing and the error message will be saved in XenStore.
> - */
> -int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void
> **vaddr)
> +static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
> + int gnt_ref, void **vaddr)
> {
> struct gnttab_map_grant_ref op = {
> .flags = GNTMAP_host_map | GNTMAP_contains_pte,
> @@ -469,6 +467,64 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev,
> int gnt_ref, void **vaddr)
> *vaddr = area->addr;
> return 0;
> }
> +
> +static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
> + int gnt_ref, void **vaddr)
> +{
> + struct xenbus_map_node *node;
> + int err;
> + void *addr;
> +
> + *vaddr = NULL;
> +
> + node = kzalloc(sizeof(*node), GFP_KERNEL);
> + if (!node)
> + return -ENOMEM;
> +
> + err = alloc_xenballooned_pages(1, &node->page, false /* lowmem */);
> + if (err)
> + goto out_err;
> +
> + addr = pfn_to_kaddr(page_to_pfn(node->page));
> +
> + err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr);
> + if (err)
> + goto out_err;
> +
> + spin_lock(&xenbus_valloc_lock);
> + list_add(&node->next, &xenbus_valloc_pages);
> + spin_unlock(&xenbus_valloc_lock);
> +
> + *vaddr = addr;
> + return 0;
> +
> + out_err:
> + free_xenballooned_pages(1, &node->page);
> + kfree(node);
> + return err;
> +}
> +
> +/**
> + * xenbus_map_ring_valloc
> + * @dev: xenbus device
> + * @gnt_ref: grant reference
> + * @vaddr: pointer to address to be filled out by mapping
> + *
> + * Based on Rusty Russell's skeleton driver's map_page.
> + * Map a page of memory into this domain from another domain's grant table.
> + * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
> + * page to that address, and sets *vaddr to that address.
> + * Returns 0 on success, and GNTST_* (see
> xen/include/interface/grant_table.h)
> + * or -ENOMEM on error. If an error is returned, device will switch to
> + * XenbusStateClosing and the error message will be saved in XenStore.
> + */
> +int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void
> **vaddr)
> +{
> + if (xen_pv_domain())
> + return xenbus_map_ring_valloc_pv(dev, gnt_ref, vaddr);
> + else
> + return xenbus_map_ring_valloc_hvm(dev, gnt_ref, vaddr);
This could be done in a similar way which Annie's granttable v2 patches are
done.
Meaning define something like:
struct xenbus_ring_ops {
int (*map)(struct xenbus_device *dev, int gnt, void *vaddr);
...
And then define two variants of it (PV and HVM):
struct xenbus_ring_ops pv_ring_ops = {
.map = xenbus_map_ring_valloc_pv;
..
}
have a static to which either one will be assigned:
static struct xenbus_ring_ops *ring_ops;
And in the init function:
void __init xenbus_ring_ops_init(void)
{
if (xen_hvm_domain)
ring_ops = hvm_ring_ops;
else
...
And then xenbus_init() calls the xenbus_rings_ops_init().
Then these 'xenbus_map_ring_valloc' end up just using the
ring_ops->map call.
> +}
> EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
>
>
> @@ -510,20 +566,7 @@ int xenbus_map_ring(struct xenbus_device *dev, int
> gnt_ref,
> }
> EXPORT_SYMBOL_GPL(xenbus_map_ring);
>
> -
> -/**
> - * xenbus_unmap_ring_vfree
> - * @dev: xenbus device
> - * @vaddr: addr to unmap
> - *
> - * Based on Rusty Russell's skeleton driver's unmap_page.
> - * Unmap a page of memory in this domain that was imported from another
> domain.
> - * Use xenbus_unmap_ring_vfree if you mapped in your memory with
> - * xenbus_map_ring_valloc (it will free the virtual address space).
> - * Returns 0 on success and returns GNTST_* on error
> - * (see xen/include/interface/grant_table.h).
> - */
> -int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
> +static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
> {
> struct vm_struct *area;
> struct gnttab_unmap_grant_ref op = {
> @@ -566,8 +609,60 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev,
> void *vaddr)
>
> return op.status;
> }
> -EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
>
> +static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void
> *vaddr)
> +{
> + int rv;
> + struct xenbus_map_node *node;
> + void *addr;
> +
> + spin_lock(&xenbus_valloc_lock);
> + list_for_each_entry(node, &xenbus_valloc_pages, next) {
> + addr = pfn_to_kaddr(page_to_pfn(node->page));
> + if (addr == vaddr) {
> + list_del(&node->next);
> + goto found;
> + }
> + }
> + node = NULL;
> + found:
> + spin_unlock(&xenbus_valloc_lock);
> +
> + if (!node) {
> + xenbus_dev_error(dev, -ENOENT,
> + "can't find mapped virtual address %p", vaddr);
> + return -ENOENT;
> + }
> +
> + rv = xenbus_unmap_ring(dev, node->handle, addr);
> +
> + if (!rv)
> + free_xenballooned_pages(1, &node->page);
else
WARN_ON("Leaking %p \n", vaddr);
> +
> + kfree(node);
> + return rv;
> +}
> +
> +/**
> + * xenbus_unmap_ring_vfree
> + * @dev: xenbus device
> + * @vaddr: addr to unmap
> + *
> + * Based on Rusty Russell's skeleton driver's unmap_page.
> + * Unmap a page of memory in this domain that was imported from another
> domain.
> + * Use xenbus_unmap_ring_vfree if you mapped in your memory with
> + * xenbus_map_ring_valloc (it will free the virtual address space).
> + * Returns 0 on success and returns GNTST_* on error
Well, that is not true anymore. You are also returning -ENOENT.
> + * (see xen/include/interface/grant_table.h).
> + */
> +int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
> +{
> + if (xen_pv_domain())
> + return xenbus_unmap_ring_vfree_pv(dev, vaddr);
> + else
> + return xenbus_unmap_ring_vfree_hvm(dev, vaddr);
> +}
> +EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
>
> /**
> * xenbus_unmap_ring
> --
> 1.7.7.4
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxxxxxxxx
> http://lists.xensource.com/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |