[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3/4] usb: Introduce Xen pvUSB backend



Introduces the Xen pvUSB backend. With pvUSB it is possible for a Xen
domU to communicate with a USB device assigned to that domU. The
communication is all done via the pvUSB backend in a driver domain
(usually Dom0) which is owner of the physical device.

The code is taken from the pvUSB implementation in Xen done by Fujitsu
based on Linux kernel 2.6.18.

Changes from the original version are:
- port to upstream kernel
- put all code in just one source file
- move module to appropriate location in kernel tree
- adapt to Linux style guide
- allocate resources dynamically
- use threaded irq
- correct sequence of state changes when assigning a device

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
 drivers/usb/Makefile          |    1 +
 drivers/usb/xen/Kconfig       |   10 +
 drivers/usb/xen/Makefile      |    1 +
 drivers/usb/xen/xen-usbback.c | 1845 +++++++++++++++++++++++++++++++++++++++++
 4 files changed, 1857 insertions(+)
 create mode 100644 drivers/usb/xen/xen-usbback.c

diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 2676ef6..41f7398 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -64,3 +64,4 @@ obj-$(CONFIG_USB_COMMON)      += common/
 obj-$(CONFIG_USBIP_CORE)       += usbip/
 
 obj-$(CONFIG_XEN_USB_FRONTEND) += xen/
+obj-$(CONFIG_XEN_USB_BACKEND)  += xen/
diff --git a/drivers/usb/xen/Kconfig b/drivers/usb/xen/Kconfig
index 5d995477..3414617 100644
--- a/drivers/usb/xen/Kconfig
+++ b/drivers/usb/xen/Kconfig
@@ -8,3 +8,13 @@ config XEN_USB_FRONTEND
          within another guest OS (usually Dom0).
          Only needed if the kernel is running in a Xen guest and generic
          access to a USB device is needed.
+
+config XEN_USB_BACKEND
+       tristate "Xen USB backend driver"
+       depends on XEN_BACKEND
+       default m
+       help
+         The USB backend driver allows the kernel to export its USB Devices
+         to other guests via a high-performance shared-memory interface.
+         Only needed for systems running as Xen driver domains (e.g. Dom0) and
+         if guests need generic access to USB devices.
diff --git a/drivers/usb/xen/Makefile b/drivers/usb/xen/Makefile
index 4568c26..c1a571065 100644
--- a/drivers/usb/xen/Makefile
+++ b/drivers/usb/xen/Makefile
@@ -3,3 +3,4 @@
 #
 
 obj-$(CONFIG_XEN_USB_FRONTEND)         += xen-usbfront.o
+obj-$(CONFIG_XEN_USB_BACKEND)          += xen-usbback.o
diff --git a/drivers/usb/xen/xen-usbback.c b/drivers/usb/xen/xen-usbback.c
new file mode 100644
index 0000000..56a600e
--- /dev/null
+++ b/drivers/usb/xen/xen-usbback.c
@@ -0,0 +1,1845 @@
+/*
+ * xen-usbback.c
+ *
+ * Xen USB backend driver.
+ *
+ * Copyright (C) 2009, FUJITSU LABORATORIES LTD.
+ * Author: Noboru Iwamatsu <n_iwamatsu@xxxxxxxxxxxxxx>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * or, by your choice,
+ *
+ * When distributed separately from the Linux kernel or incorporated into
+ * other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/kref.h>
+
+#include <linux/usb/ch11.h>
+
+#include <xen/xen.h>
+#include <xen/balloon.h>
+#include <xen/events.h>
+#include <xen/xenbus.h>
+#include <xen/grant_table.h>
+#include <xen/page.h>
+
+#include <xen/interface/grant_table.h>
+#include <xen/interface/io/usbif.h>
+
+struct usbstub;
+
+#define USBBACK_BUS_ID_SIZE 20
+#define USB_DEV_ADDR_SIZE 128
+
+struct usbback_info {
+       domid_t domid;
+       unsigned handle;
+       int num_ports;
+       enum usb_spec_version usb_ver;
+
+       struct xenbus_device *xbdev;
+       struct list_head usbif_list;
+
+       unsigned irq;
+
+       struct usbif_urb_back_ring urb_ring;
+       struct usbif_conn_back_ring conn_ring;
+
+       spinlock_t urb_ring_lock;
+       spinlock_t conn_ring_lock;
+       atomic_t refcnt;
+       int is_connected;
+
+       int ring_error;
+
+       struct xenbus_watch backend_watch;
+
+       /* device address lookup table */
+       struct usbstub *addr_table[USB_DEV_ADDR_SIZE];
+       spinlock_t addr_lock;
+
+       /* connected device list */
+       struct list_head stub_list;
+       spinlock_t stub_lock;
+
+       /* deferred hotplug list, guarded by conn_ring_lock */
+       struct list_head hotplug_list;
+
+       /* request schedule */
+       wait_queue_head_t waiting_to_free;
+};
+
+struct vusb_port_id {
+       struct list_head id_list;
+       struct list_head hotplug_list;
+       char phys_bus[USBBACK_BUS_ID_SIZE];
+       domid_t domid;
+       unsigned handle;
+       int portnum;
+       int speed;
+};
+
+struct usbstub {
+       struct kref kref;
+       struct list_head dev_list;
+
+       struct vusb_port_id *portid;
+       struct usb_device *udev;
+       struct usbback_info *usbif;
+       int addr;
+
+       struct list_head submitting_list;
+       spinlock_t submitting_lock;
+};
+
+struct pending_req_segment {
+       uint16_t offset;
+       uint16_t length;
+};
+
+struct pending_req {
+       struct usbback_info *usbif;
+
+       uint16_t id; /* request id */
+
+       struct usbstub *stub;
+       struct list_head urb_list;
+
+       /* urb */
+       struct urb *urb;
+       void *buffer;
+       dma_addr_t transfer_dma;
+       struct usb_ctrlrequest *setup;
+
+       /* request segments */
+       uint16_t nr_buffer_segs; /* number of urb->transfer_buffer segments */
+       uint16_t nr_extra_segs; /* number of iso_frame_desc segments (ISO) */
+       struct pending_req_segment seg[USBIF_MAX_SEGMENTS_PER_REQUEST];
+       grant_handle_t grant_handles[USBIF_MAX_SEGMENTS_PER_REQUEST];
+       struct page *pages[USBIF_MAX_SEGMENTS_PER_REQUEST];
+};
+
+struct work_request_data {
+       int (*work_func)(struct usb_device *, struct work_request_data *);
+       union {
+               struct {
+                       int pipe;
+               } clear_halt;
+               struct {
+                       int interface;
+                       int alternate;
+               } set_interface;
+       };
+};
+
+struct work_request {
+       struct pending_req *pending_req;
+       struct work_request_data data;
+       struct work_struct work;
+};
+
+static int usbback_max_buffer_pages = 1024;
+module_param_named(max_buffer_pages, usbback_max_buffer_pages, int, 0644);
+MODULE_PARM_DESC(max_buffer_pages,
+"Maximum number of free pages to keep in backend buffer");
+
+static struct kmem_cache *usbback_cachep;
+static DEFINE_SPINLOCK(usbback_free_pages_lock);
+static int usbback_free_pages_num;
+static LIST_HEAD(usbback_free_pages);
+static LIST_HEAD(usbback_pending_urb_free);
+static DEFINE_SPINLOCK(usbback_urb_free_lock);
+static LIST_HEAD(usbback_port_list);
+static DEFINE_SPINLOCK(usbback_port_list_lock);
+static LIST_HEAD(usbback_usbif_list);
+static DEFINE_SPINLOCK(usbback_usbif_list_lock);
+
+#define USBBACK_INVALID_HANDLE (~0)
+
+static void usbback_get(struct usbback_info *info)
+{
+       atomic_inc(&info->refcnt);
+}
+
+static void usbback_put(struct usbback_info *info)
+{
+       if (atomic_dec_and_test(&info->refcnt))
+               wake_up(&info->waiting_to_free);
+}
+
+static void usbback_put_free_pages(struct page **page, int num)
+{
+       unsigned long flags;
+       int i = usbback_free_pages_num + num, n = num;
+
+       if (num == 0)
+               return;
+       if (i > usbback_max_buffer_pages) {
+               n = min(num, i - usbback_max_buffer_pages);
+               free_xenballooned_pages(n, page + num - n);
+               n = num - n;
+       }
+       spin_lock_irqsave(&usbback_free_pages_lock, flags);
+       for (i = 0; i < n; i++)
+               list_add(&page[i]->lru, &usbback_free_pages);
+       usbback_free_pages_num += n;
+       spin_unlock_irqrestore(&usbback_free_pages_lock, flags);
+}
+
+static int usbback_get_free_page(struct page **page)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&usbback_free_pages_lock, flags);
+       if (list_empty(&usbback_free_pages)) {
+               spin_unlock_irqrestore(&usbback_free_pages_lock, flags);
+               return alloc_xenballooned_pages(1, page, false);
+       }
+       page[0] = list_first_entry(&usbback_free_pages, struct page, lru);
+       list_del(&page[0]->lru);
+       usbback_free_pages_num--;
+       spin_unlock_irqrestore(&usbback_free_pages_lock, flags);
+       return 0;
+}
+
+static unsigned long usbback_vaddr(struct pending_req *req, int seg)
+{
+       unsigned long pfn = page_to_pfn(req->pages[seg]);
+
+       return (unsigned long)pfn_to_kaddr(pfn);
+}
+
+static void usbback_add_req_to_submitting_list(struct pending_req *pending_req)
+{
+       unsigned long flags;
+       struct usbstub *stub = pending_req->stub;
+
+       spin_lock_irqsave(&stub->submitting_lock, flags);
+       list_add_tail(&pending_req->urb_list, &stub->submitting_list);
+       spin_unlock_irqrestore(&stub->submitting_lock, flags);
+}
+
+static void usbback_rm_req_from_submitting_list(struct pending_req 
*pending_req)
+{
+       unsigned long flags;
+       struct usbstub *stub = pending_req->stub;
+
+       spin_lock_irqsave(&stub->submitting_lock, flags);
+       list_del_init(&pending_req->urb_list);
+       spin_unlock_irqrestore(&stub->submitting_lock, flags);
+}
+
+static void usbback_unlink_urbs(struct usbstub *stub)
+{
+       struct pending_req *req, *tmp;
+       unsigned long flags;
+
+       spin_lock_irqsave(&stub->submitting_lock, flags);
+       list_for_each_entry_safe(req, tmp, &stub->submitting_list, urb_list) {
+               usb_unlink_urb(req->urb);
+       }
+       spin_unlock_irqrestore(&stub->submitting_lock, flags);
+}
+
+static void usbback_fast_flush_area(struct pending_req *pending_req)
+{
+       struct gnttab_unmap_grant_ref unmap[USBIF_MAX_SEGMENTS_PER_REQUEST];
+       struct page *pages[USBIF_MAX_SEGMENTS_PER_REQUEST];
+       unsigned i, nr_segs, invcount = 0;
+       grant_handle_t handle;
+       int ret;
+
+       nr_segs = pending_req->nr_buffer_segs + pending_req->nr_extra_segs;
+       if (!nr_segs)
+               return;
+
+       for (i = 0; i < nr_segs; i++) {
+               handle = pending_req->grant_handles[i];
+               if (handle == USBBACK_INVALID_HANDLE)
+                       continue;
+               gnttab_set_unmap_op(&unmap[invcount],
+                                   usbback_vaddr(pending_req, i),
+                                   GNTMAP_host_map, handle);
+               pages[invcount] = pending_req->pages[i];
+               invcount++;
+       }
+
+       ret = gnttab_unmap_refs(unmap, NULL, pages, invcount);
+       BUG_ON(ret);
+
+       usbback_put_free_pages(pending_req->pages, nr_segs);
+}
+
+static void usbback_copy_buff_to_pages(void *buf,
+                                      struct pending_req *pending_req,
+                                      int start, int nr_pages, unsigned offset,
+                                      unsigned length)
+{
+       int i;
+       struct pending_req_segment *seg;
+       unsigned buf_off = 0;
+       unsigned off, len;
+
+       seg = pending_req->seg + start;
+       for (i = start; i < start + nr_pages; i++) {
+               len = seg->length;
+               off = seg->offset;
+               if (buf_off + len > offset) {
+                       if (buf_off < offset) {
+                               len -= offset - buf_off;
+                               off += offset - buf_off;
+                               buf_off += offset - buf_off;
+                       }
+                       if (buf_off + len > offset + length)
+                               len -= offset + length - buf_off;
+                       memcpy((void *)usbback_vaddr(pending_req, i) + off,
+                              buf + buf_off, len);
+               }
+               buf_off += len;
+               if (buf_off >= offset + length)
+                       return;
+               seg++;
+       }
+}
+
+static void usbback_copy_pages_to_buff(void *buf,
+                                      struct pending_req *pending_req,
+                                      int start, int nr_pages)
+{
+       int i;
+       struct pending_req_segment *seg;
+
+       seg = pending_req->seg + start;
+       for (i = start; i < start + nr_pages; i++) {
+               memcpy(buf, (void *)usbback_vaddr(pending_req, i) + seg->offset,
+                      seg->length);
+               buf += seg->length;
+               seg++;
+       }
+}
+
+static int usbback_alloc_urb(struct usbif_urb_request *req,
+                            struct pending_req *pending_req)
+{
+       int ret;
+
+       if (usb_pipeisoc(req->pipe))
+               pending_req->urb = usb_alloc_urb(req->u.isoc.number_of_packets,
+                                                GFP_KERNEL);
+       else
+               pending_req->urb = usb_alloc_urb(0, GFP_KERNEL);
+       if (!pending_req->urb)
+               return -ENOMEM;
+
+       if (req->buffer_length) {
+               pending_req->buffer =
+                       usb_alloc_coherent(pending_req->stub->udev,
+                                          req->buffer_length, GFP_KERNEL,
+                                          &pending_req->transfer_dma);
+               if (!pending_req->buffer) {
+                       ret = -ENOMEM;
+                       goto fail_free_urb;
+               }
+       }
+
+       if (usb_pipecontrol(req->pipe)) {
+               pending_req->setup = kmalloc(sizeof(struct usb_ctrlrequest),
+                                            GFP_KERNEL);
+               if (!pending_req->setup) {
+                       ret = -ENOMEM;
+                       goto fail_free_buffer;
+               }
+       }
+
+       return 0;
+
+fail_free_buffer:
+       if (req->buffer_length)
+               usb_free_coherent(pending_req->stub->udev, req->buffer_length,
+                                 pending_req->buffer,
+                                 pending_req->transfer_dma);
+fail_free_urb:
+       usb_free_urb(pending_req->urb);
+       return ret;
+}
+
+static void usbback_free_urb(struct urb *urb)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&usbback_urb_free_lock, flags);
+       list_add(&urb->urb_list, &usbback_pending_urb_free);
+       spin_unlock_irqrestore(&usbback_urb_free_lock, flags);
+}
+
+static void _usbback_free_urb(struct urb *urb)
+{
+       if (usb_pipecontrol(urb->pipe))
+               kfree(urb->setup_packet);
+       if (urb->transfer_buffer_length)
+               usb_free_coherent(urb->dev, urb->transfer_buffer_length,
+                                 urb->transfer_buffer, urb->transfer_dma);
+       usb_free_urb(urb);
+}
+
+static void usbback__free_urbs(void)
+{
+       unsigned long flags;
+       struct list_head tmp_list;
+       struct urb *next_urb;
+
+       if (list_empty(&usbback_pending_urb_free))
+               return;
+
+       INIT_LIST_HEAD(&tmp_list);
+
+       spin_lock_irqsave(&usbback_urb_free_lock, flags);
+       list_splice_init(&usbback_pending_urb_free, &tmp_list);
+       spin_unlock_irqrestore(&usbback_urb_free_lock, flags);
+
+       while (!list_empty(&tmp_list)) {
+               next_urb = list_first_entry(&tmp_list, struct urb, urb_list);
+               list_del(&next_urb->urb_list);
+               _usbback_free_urb(next_urb);
+       }
+}
+
+static void usbback_do_response(struct pending_req *pending_req,
+                               int32_t status, int32_t actual_length,
+                               int32_t error_count, uint16_t start_frame)
+{
+       struct usbback_info *usbif = pending_req->usbif;
+       struct usbif_urb_response *res;
+       unsigned long flags;
+       int notify;
+
+       spin_lock_irqsave(&usbif->urb_ring_lock, flags);
+       res = RING_GET_RESPONSE(&usbif->urb_ring, usbif->urb_ring.rsp_prod_pvt);
+       res->id = pending_req->id;
+       res->status = status;
+       res->actual_length = actual_length;
+       res->error_count = error_count;
+       res->start_frame = start_frame;
+       usbif->urb_ring.rsp_prod_pvt++;
+       barrier();
+       RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&usbif->urb_ring, notify);
+       spin_unlock_irqrestore(&usbif->urb_ring_lock, flags);
+
+       if (notify)
+               notify_remote_via_irq(usbif->irq);
+}
+
+static void usbback_do_response_ret(struct pending_req *pending_req,
+                                   int32_t status)
+{
+       usbback_do_response(pending_req, status, 0, 0, 0);
+       usbback_put(pending_req->usbif);
+       kmem_cache_free(usbback_cachep, pending_req);
+}
+
+static void usbback_copy_isoc_to_pages(struct pending_req *pending_req)
+{
+       unsigned sz, n_isoc, i;
+       struct urb *urb = pending_req->urb;
+       struct usb_iso_packet_descriptor *isoc;
+
+       isoc = &urb->iso_frame_desc[0];
+       n_isoc = urb->number_of_packets;
+       sz = n_isoc * sizeof(struct usb_iso_packet_descriptor);
+
+       usbback_copy_buff_to_pages(isoc, pending_req,
+                                  pending_req->nr_buffer_segs,
+                                  pending_req->nr_extra_segs, 0, sz);
+
+       if (!usb_pipein(urb->pipe))
+               return;
+
+       for (i = 0; i < n_isoc; i++) {
+               usbback_copy_buff_to_pages(pending_req->buffer,
+                                          pending_req, 0,
+                                          pending_req->nr_buffer_segs,
+                                          isoc->offset, isoc->actual_length);
+               isoc++;
+       }
+}
+
+static void usbback_urb_complete(struct urb *urb)
+{
+       struct pending_req *pending_req = (struct pending_req *)urb->context;
+
+       if (usb_pipeisoc(urb->pipe))
+               usbback_copy_isoc_to_pages(pending_req);
+       else if (usb_pipein(urb->pipe) && urb->actual_length > 0)
+               usbback_copy_buff_to_pages(pending_req->buffer, pending_req,
+                                          0, pending_req->nr_buffer_segs,
+                                          0, urb->actual_length);
+
+       usbback_fast_flush_area(pending_req);
+
+       usbback_do_response(pending_req, urb->status, urb->actual_length,
+                           urb->error_count, urb->start_frame);
+
+       usbback_rm_req_from_submitting_list(pending_req);
+
+       usbback_free_urb(urb);
+       usbback_put(pending_req->usbif);
+       kmem_cache_free(usbback_cachep, pending_req);
+}
+
+static int usbback_gnttab_map(struct usbback_info *usbif,
+                             struct usbif_urb_request *req,
+                             struct pending_req *pending_req)
+{
+       int i, ret;
+       unsigned nr_segs;
+       uint32_t flags;
+       struct gnttab_map_grant_ref map[USBIF_MAX_SEGMENTS_PER_REQUEST];
+
+       nr_segs = pending_req->nr_buffer_segs + pending_req->nr_extra_segs;
+       if (!nr_segs)
+               return 0;
+
+       if (nr_segs > USBIF_MAX_SEGMENTS_PER_REQUEST) {
+               pr_err("xen-pvusb: Bad number of segments in request\n");
+               return -EINVAL;
+       }
+
+       if (pending_req->nr_buffer_segs) {
+               flags = GNTMAP_host_map;
+               if (usb_pipeout(req->pipe))
+                       flags |= GNTMAP_readonly;
+               for (i = 0; i < pending_req->nr_buffer_segs; i++) {
+                       if (usbback_get_free_page(pending_req->pages + i)) {
+                               usbback_put_free_pages(pending_req->pages, i);
+                               pr_err("xen-pvusb: no grant page\n");
+                               return -ENOMEM;
+                       }
+                       gnttab_set_map_op(&map[i],
+                                         usbback_vaddr(pending_req, i), flags,
+                                         req->seg[i].gref, usbif->domid);
+               }
+       }
+
+       if (pending_req->nr_extra_segs) {
+               flags = GNTMAP_host_map;
+               for (i = req->nr_buffer_segs; i < nr_segs; i++) {
+                       if (usbback_get_free_page(pending_req->pages + i)) {
+                               usbback_put_free_pages(pending_req->pages, i);
+                               pr_err("xen-pvusb: no grant page\n");
+                               return -ENOMEM;
+                       }
+                       gnttab_set_map_op(&map[i],
+                                         usbback_vaddr(pending_req, i), flags,
+                                         req->seg[i].gref, usbif->domid);
+               }
+       }
+
+       ret = gnttab_map_refs(map, NULL, pending_req->pages, nr_segs);
+       BUG_ON(ret);
+
+       for (i = 0; i < nr_segs; i++) {
+               if (unlikely(map[i].status != GNTST_okay)) {
+                       pr_err("xen-pvusb: invalid buffer, could not map it\n");
+                       map[i].handle = USBBACK_INVALID_HANDLE;
+                       ret = 1;
+               }
+
+               pending_req->grant_handles[i] = map[i].handle;
+
+               if (ret)
+                       continue;
+
+               pending_req->seg[i].offset = req->seg[i].offset;
+               pending_req->seg[i].length = req->seg[i].length;
+
+               if ((unsigned)pending_req->seg[i].offset +
+                   (unsigned)pending_req->seg[i].length > PAGE_SIZE)
+                       ret = 1;
+       }
+
+       if (ret) {
+               usbback_fast_flush_area(pending_req);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void usbback_init_urb(struct usbif_urb_request *req,
+                            struct pending_req *pending_req)
+{
+       unsigned pipe;
+       struct usb_device *udev = pending_req->stub->udev;
+       struct urb *urb = pending_req->urb;
+
+       switch (usb_pipetype(req->pipe)) {
+       case PIPE_ISOCHRONOUS:
+               pipe = usb_pipein(req->pipe) ?
+                       usb_rcvisocpipe(udev, usb_pipeendpoint(req->pipe)) :
+                       usb_sndisocpipe(udev, usb_pipeendpoint(req->pipe));
+               urb->dev = udev;
+               urb->pipe = pipe;
+               urb->transfer_flags = req->transfer_flags;
+               urb->transfer_flags |= URB_ISO_ASAP;
+               urb->transfer_buffer = pending_req->buffer;
+               urb->transfer_buffer_length = req->buffer_length;
+               urb->complete = usbback_urb_complete;
+               urb->context = pending_req;
+               urb->interval = req->u.isoc.interval;
+               urb->start_frame = req->u.isoc.start_frame;
+               urb->number_of_packets = req->u.isoc.number_of_packets;
+               break;
+
+       case PIPE_INTERRUPT:
+               pipe = usb_pipein(req->pipe) ?
+                       usb_rcvintpipe(udev, usb_pipeendpoint(req->pipe)) :
+                       usb_sndintpipe(udev, usb_pipeendpoint(req->pipe));
+               usb_fill_int_urb(urb, udev, pipe,
+                               pending_req->buffer, req->buffer_length,
+                               usbback_urb_complete,
+                               pending_req, req->u.intr.interval);
+               /*
+                * High speed interrupt endpoints use a logarithmic encoding of
+                * the endpoint interval, and usb_fill_int_urb() initializes a
+                * interrupt urb with the encoded interval value.
+                *
+                * req->u.intr.interval is the interval value that already
+                * encoded in the frontend part, and the above
+                * usb_fill_int_urb() initializes the urb->interval with double
+                * encoded value.
+                *
+                * So, simply overwrite the urb->interval with original value.
+                */
+               urb->interval = req->u.intr.interval;
+               urb->transfer_flags = req->transfer_flags;
+               break;
+
+       case PIPE_CONTROL:
+               pipe = usb_pipein(req->pipe) ? usb_rcvctrlpipe(udev, 0) :
+                                              usb_sndctrlpipe(udev, 0);
+               usb_fill_control_urb(urb, udev, pipe,
+                               (unsigned char *)pending_req->setup,
+                               pending_req->buffer, req->buffer_length,
+                               usbback_urb_complete, pending_req);
+               memcpy(pending_req->setup, req->u.ctrl, 8);
+               urb->transfer_flags = req->transfer_flags;
+               break;
+
+       case PIPE_BULK:
+               pipe = usb_pipein(req->pipe) ?
+                       usb_rcvbulkpipe(udev, usb_pipeendpoint(req->pipe)) :
+                       usb_sndbulkpipe(udev, usb_pipeendpoint(req->pipe));
+               usb_fill_bulk_urb(urb, udev, pipe,
+                               pending_req->buffer, req->buffer_length,
+                               usbback_urb_complete, pending_req);
+               urb->transfer_flags = req->transfer_flags;
+               break;
+
+       default:
+               break;
+       }
+
+       if (req->buffer_length) {
+               urb->transfer_dma = pending_req->transfer_dma;
+               urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+       }
+}
+
+static int usbback_set_interface_work(struct usb_device *udev,
+                                      struct work_request_data *data)
+{
+       int ret;
+
+       usb_lock_device(udev);
+       ret = usb_set_interface(udev, data->set_interface.interface,
+                               data->set_interface.alternate);
+       usb_unlock_device(udev);
+
+       return ret;
+}
+
+static int usbback_clear_halt_work(struct usb_device *udev,
+                                  struct work_request_data *data)
+{
+       int ret;
+
+       usb_lock_device(udev);
+       ret = usb_clear_halt(udev, data->clear_halt.pipe);
+       usb_unlock_device(udev);
+
+       return ret;
+}
+
+static void usbback_work(struct work_struct *arg)
+{
+       struct work_request *req;
+       struct pending_req *pending_req;
+       struct usb_device *udev;
+       int ret;
+
+       req = container_of(arg, struct work_request, work);
+       pending_req = req->pending_req;
+       udev = pending_req->stub->udev;
+
+       ret = req->data.work_func(udev, &req->data);
+       usb_put_dev(udev);
+
+       usbback_do_response_ret(pending_req, ret);
+       kfree(req);
+}
+
+static void usbback_sched_work(struct pending_req *pending_req,
+                              struct work_request_data *data)
+{
+       struct work_request *req;
+       struct usb_device *udev = pending_req->stub->udev;
+
+       req = kmalloc(sizeof(*req), GFP_KERNEL);
+       if (!req) {
+               usbback_do_response_ret(pending_req, -ESHUTDOWN);
+               return;
+       }
+
+       req->pending_req = pending_req;
+       req->data = *data;
+       INIT_WORK(&req->work, usbback_work);
+
+       usb_get_dev(udev);
+       schedule_work(&req->work);
+}
+
+static void usbback_set_interface(struct pending_req *pending_req,
+                                 int interface, int alternate)
+{
+       struct work_request_data data;
+
+       data.work_func = usbback_set_interface_work;
+       data.set_interface.interface = interface;
+       data.set_interface.alternate = alternate;
+       usbback_sched_work(pending_req, &data);
+}
+
+static void usbback_clear_halt(struct pending_req *pending_req, int pipe)
+{
+       struct work_request_data data;
+
+       data.work_func = usbback_clear_halt_work;
+       data.clear_halt.pipe = pipe;
+       usbback_sched_work(pending_req, &data);
+}
+
+static void usbback_set_address(struct usbback_info *usbif,
+                               struct usbstub *stub, int cur_addr,
+                               int new_addr)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&usbif->addr_lock, flags);
+       if (cur_addr)
+               usbif->addr_table[cur_addr] = NULL;
+       if (new_addr)
+               usbif->addr_table[new_addr] = stub;
+       stub->addr = new_addr;
+       spin_unlock_irqrestore(&usbif->addr_lock, flags);
+}
+
+static struct usbstub *usbback_find_attached_device(struct usbback_info *usbif,
+                                                   int portnum)
+{
+       struct usbstub *stub;
+       int found = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&usbif->stub_lock, flags);
+       list_for_each_entry(stub, &usbif->stub_list, dev_list)
+               if (stub->portid->portnum == portnum) {
+                       found = 1;
+                       break;
+               }
+       spin_unlock_irqrestore(&usbif->stub_lock, flags);
+
+       return found ? stub : NULL;
+}
+
+static void usbback_process_unlink_req(struct usbif_urb_request *req,
+                                      struct pending_req *pending_req)
+{
+       struct usbback_info *usbif = pending_req->usbif;
+       struct pending_req *unlink_req;
+       int devnum;
+       int ret = 0;
+       unsigned long flags;
+
+       devnum = usb_pipedevice(req->pipe);
+       if (unlikely(devnum == 0)) {
+               pending_req->stub = usbback_find_attached_device(usbif,
+                                               usbif_pipeportnum(req->pipe));
+               if (unlikely(!pending_req->stub)) {
+                       ret = -ENODEV;
+                       goto fail_response;
+               }
+       } else {
+               if (unlikely(!usbif->addr_table[devnum])) {
+                       ret = -ENODEV;
+                       goto fail_response;
+               }
+               pending_req->stub = usbif->addr_table[devnum];
+       }
+
+       spin_lock_irqsave(&pending_req->stub->submitting_lock, flags);
+       list_for_each_entry(unlink_req, &pending_req->stub->submitting_list,
+                           urb_list)
+               if (unlink_req->id == req->u.unlink.unlink_id) {
+                       ret = usb_unlink_urb(unlink_req->urb);
+                       break;
+               }
+       spin_unlock_irqrestore(&pending_req->stub->submitting_lock, flags);
+
+fail_response:
+       usbback_do_response_ret(pending_req, ret);
+}
+
+static int usbback_check_and_submit(struct usbif_urb_request *req,
+                                   struct pending_req *pending_req)
+{
+       struct usbback_info *usbif = pending_req->usbif;
+       int devnum;
+       struct usbstub *stub = NULL;
+       struct usb_ctrlrequest *ctrl = (struct usb_ctrlrequest *)req->u.ctrl;
+       int ret;
+       int done = 0;
+       __u16 wValue = le16_to_cpu(ctrl->wValue);
+       __u16 wIndex = le16_to_cpu(ctrl->wIndex);
+
+       devnum = usb_pipedevice(req->pipe);
+
+       /*
+        * When the device is first connected or resetted, USB device has no
+        * address. In this initial state, following requests are sent to device
+        * address (#0),
+        *
+        *  1. GET_DESCRIPTOR (with Descriptor Type is "DEVICE") is sent,
+        *     and OS knows what device is connected to.
+        *
+        *  2. SET_ADDRESS is sent, and then device has its address.
+        *
+        * In the next step, SET_CONFIGURATION is sent to addressed device, and
+        * then the device is finally ready to use.
+        */
+       if (unlikely(devnum == 0)) {
+               stub = usbback_find_attached_device(usbif,
+                                               usbif_pipeportnum(req->pipe));
+               if (unlikely(!stub)) {
+                       ret = -ENODEV;
+                       goto do_response;
+               }
+
+               switch (ctrl->bRequest) {
+               case USB_REQ_GET_DESCRIPTOR:
+                       /*
+                        * GET_DESCRIPTOR request to device #0.
+                        * through to normal urb transfer.
+                        */
+                       pending_req->stub = stub;
+                       return 0;
+               case USB_REQ_SET_ADDRESS:
+                       /*
+                        * SET_ADDRESS request to device #0.
+                        * add attached device to addr_table.
+                        */
+                       usbback_set_address(usbif, stub, 0, wValue);
+                       ret = 0;
+                       break;
+               default:
+                       ret = -EINVAL;
+                       break;
+               }
+               goto do_response;
+       }
+
+       if (unlikely(!usbif->addr_table[devnum])) {
+               ret = -ENODEV;
+               goto do_response;
+       }
+       pending_req->stub = usbif->addr_table[devnum];
+
+       /*
+        * Check special request
+        */
+       switch (ctrl->bRequest) {
+       case USB_REQ_SET_ADDRESS:
+               /*
+                * SET_ADDRESS request to addressed device.
+                * change addr or remove from addr_table.
+                */
+               usbback_set_address(usbif, pending_req->stub, devnum, wValue);
+               ret = 0;
+               goto do_response;
+       case USB_REQ_SET_INTERFACE:
+               if (ctrl->bRequestType == USB_RECIP_INTERFACE) {
+                       usbback_set_interface(pending_req, wIndex, wValue);
+                       done = 1;
+               }
+               break;
+       case USB_REQ_CLEAR_FEATURE:
+               if (ctrl->bRequestType == USB_RECIP_ENDPOINT &&
+                   wValue == USB_ENDPOINT_HALT) {
+                       int pipe;
+                       int ep = wIndex & 0x0f;
+
+                       pipe = wIndex & USB_DIR_IN ?
+                               usb_rcvctrlpipe(pending_req->stub->udev, ep) :
+                               usb_sndctrlpipe(pending_req->stub->udev, ep);
+                       usbback_clear_halt(pending_req, pipe);
+                       done = 1;
+               }
+               break;
+       default:
+               break;
+       }
+
+       return done;
+
+do_response:
+       usbback_do_response_ret(pending_req, ret);
+       return 1;
+}
+
+static void usbback_dispatch(struct usbif_urb_request *req,
+                            struct pending_req *pending_req)
+{
+       int ret;
+       struct usbback_info *usbif = pending_req->usbif;
+
+       usbback_get(usbif);
+
+       /* unlink request */
+       if (unlikely(usbif_pipeunlink(req->pipe))) {
+               usbback_process_unlink_req(req, pending_req);
+               return;
+       }
+
+       if (usb_pipecontrol(req->pipe)) {
+               if (usbback_check_and_submit(req, pending_req))
+                       return;
+       } else {
+               int devnum = usb_pipedevice(req->pipe);
+
+               if (unlikely(!usbif->addr_table[devnum])) {
+                       ret = -ENODEV;
+                       goto fail_response;
+               }
+               pending_req->stub = usbif->addr_table[devnum];
+       }
+
+       ret = usbback_alloc_urb(req, pending_req);
+       if (ret) {
+               ret = -ESHUTDOWN;
+               goto fail_response;
+       }
+
+       usbback_add_req_to_submitting_list(pending_req);
+
+       usbback_init_urb(req, pending_req);
+
+       pending_req->nr_buffer_segs = req->nr_buffer_segs;
+       if (usb_pipeisoc(req->pipe))
+               pending_req->nr_extra_segs = req->u.isoc.nr_frame_desc_segs;
+       else
+               pending_req->nr_extra_segs = 0;
+
+       ret = usbback_gnttab_map(usbif, req, pending_req);
+       if (ret) {
+               pr_err("xen-pvusb: invalid buffer\n");
+               ret = -ESHUTDOWN;
+               goto fail_free_urb;
+       }
+
+       if (usb_pipeout(req->pipe) && req->buffer_length)
+               usbback_copy_pages_to_buff(pending_req->buffer, pending_req,
+                                          0, pending_req->nr_buffer_segs);
+       if (usb_pipeisoc(req->pipe))
+               usbback_copy_pages_to_buff(&pending_req->urb->iso_frame_desc[0],
+                                          pending_req,
+                                          pending_req->nr_buffer_segs,
+                                          pending_req->nr_extra_segs);
+
+       ret = usb_submit_urb(pending_req->urb, GFP_KERNEL);
+       if (!ret)
+               return;
+
+       pr_err("xen-pvusb: failed submitting urb, error %d\n", ret);
+       ret = -ESHUTDOWN;
+
+       usbback_fast_flush_area(pending_req);
+fail_free_urb:
+       usbback_rm_req_from_submitting_list(pending_req);
+       usbback_free_urb(pending_req->urb);
+fail_response:
+       usbback_do_response_ret(pending_req, ret);
+}
+
+static int usbback_start_submit_urb(struct usbback_info *usbif)
+{
+       struct usbif_urb_back_ring *urb_ring = &usbif->urb_ring;
+       struct usbif_urb_request req;
+       struct pending_req *pending_req;
+       RING_IDX rc, rp;
+       int more_to_do;
+
+       rc = urb_ring->req_cons;
+       rp = urb_ring->sring->req_prod;
+       rmb();  /* req_cons is written by frontend. */
+
+       if (RING_REQUEST_PROD_OVERFLOW(urb_ring, rp)) {
+               rc = urb_ring->rsp_prod_pvt;
+               pr_warn("xen-pvusb: Dom%d provided bogus ring requests (%#x - 
%#x = %u). Halting ring processing on dev=%#x\n",
+                       usbif->domid, rp, rc, rp - rc, usbif->handle);
+               usbif->ring_error = 1;
+               return 0;
+       }
+
+       while (rc != rp) {
+               if (RING_REQUEST_CONS_OVERFLOW(urb_ring, rc))
+                       break;
+
+               pending_req = kmem_cache_alloc(usbback_cachep, GFP_KERNEL);
+               if (!pending_req)
+                       return 1;
+
+               req = *RING_GET_REQUEST(urb_ring, rc);
+
+               pending_req->id = req.id;
+               pending_req->usbif = usbif;
+
+               usbback_dispatch(&req, pending_req);
+
+               urb_ring->req_cons = ++rc;
+
+               cond_resched();
+       }
+
+       RING_FINAL_CHECK_FOR_REQUESTS(urb_ring, more_to_do);
+
+       return !!more_to_do;
+}
+
+static void usbback_hotplug_notify(struct usbback_info *usbif,
+                                  struct vusb_port_id *portid)
+{
+       struct usbif_conn_back_ring *ring = &usbif->conn_ring;
+       struct usbif_conn_request *req;
+       struct usbif_conn_response *res;
+       unsigned long flags;
+       u16 id;
+       int notify;
+
+       spin_lock_irqsave(&usbif->conn_ring_lock, flags);
+
+       if (!usbif->is_connected) {
+               list_add(&portid->hotplug_list, &usbif->hotplug_list);
+               spin_unlock_irqrestore(&usbif->conn_ring_lock, flags);
+               return;
+       }
+
+       req = RING_GET_REQUEST(ring, ring->req_cons);
+       id = req->id;
+       ring->req_cons++;
+       ring->sring->req_event = ring->req_cons + 1;
+
+       res = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt);
+       res->id = id;
+       res->portnum = portid->portnum;
+       res->speed = portid->speed;
+       ring->rsp_prod_pvt++;
+       RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(ring, notify);
+
+       spin_unlock_irqrestore(&usbif->conn_ring_lock, flags);
+
+       if (notify)
+               notify_remote_via_irq(usbif->irq);
+}
+
+static void usbback_do_hotplug(struct usbback_info *usbif)
+{
+       unsigned long flags;
+       struct vusb_port_id *portid;
+
+       spin_lock_irqsave(&usbif->conn_ring_lock, flags);
+       usbif->is_connected = 1;
+       while (!list_empty(&usbif->hotplug_list)) {
+               portid = list_first_entry(&usbif->hotplug_list,
+                                         struct vusb_port_id, hotplug_list);
+               list_del(&portid->hotplug_list);
+               spin_unlock_irqrestore(&usbif->conn_ring_lock, flags);
+               usbback_hotplug_notify(usbif, portid);
+               spin_lock_irqsave(&usbif->conn_ring_lock, flags);
+       }
+       spin_unlock_irqrestore(&usbif->conn_ring_lock, flags);
+}
+
+static irqreturn_t usbback_be_int(int irq, void *dev_id)
+{
+       struct usbback_info *usbif = dev_id;
+
+       if (usbif->ring_error)
+               return IRQ_HANDLED;
+
+       while (usbback_start_submit_urb(usbif)) {
+               usbback__free_urbs();
+               cond_resched();
+       }
+
+       return IRQ_HANDLED;
+}
+
+/*
+ * Attach usbstub device to usbif.
+ */
+static void usbback_attach_device(struct usbback_info *usbif,
+                                 struct usbstub *stub)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&usbif->stub_lock, flags);
+       list_add(&stub->dev_list, &usbif->stub_list);
+       spin_unlock_irqrestore(&usbif->stub_lock, flags);
+       stub->usbif = usbif;
+}
+
+/*
+ * Detach usbstub device from usbif.
+ */
+static void usbback_detach_device_without_lock(struct usbback_info *usbif,
+                                              struct usbstub *stub)
+{
+       if (stub->addr)
+               usbback_set_address(usbif, stub, stub->addr, 0);
+       list_del(&stub->dev_list);
+       stub->usbif = NULL;
+}
+
+static void usbback_detach_device(struct usbback_info *usbif,
+                                 struct usbstub *stub)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&usbif->stub_lock, flags);
+       usbback_detach_device_without_lock(usbif, stub);
+       spin_unlock_irqrestore(&usbif->stub_lock, flags);
+}
+
+static struct vusb_port_id *usbback_find_portid_by_busid(const char *busid)
+{
+       struct vusb_port_id *portid;
+       int found = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&usbback_port_list_lock, flags);
+       list_for_each_entry(portid, &usbback_port_list, id_list)
+               if (!(strncmp(portid->phys_bus, busid, USBBACK_BUS_ID_SIZE))) {
+                       found = 1;
+                       break;
+               }
+       spin_unlock_irqrestore(&usbback_port_list_lock, flags);
+
+       return found ? portid : NULL;
+}
+
+static struct vusb_port_id *usbback_find_portid(const domid_t domid,
+                                               unsigned handle, int portnum)
+{
+       struct vusb_port_id *portid;
+       int found = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&usbback_port_list_lock, flags);
+       list_for_each_entry(portid, &usbback_port_list, id_list)
+               if (portid->domid == domid && portid->handle == handle &&
+                   portid->portnum == portnum) {
+                       found = 1;
+                       break;
+               }
+       spin_unlock_irqrestore(&usbback_port_list_lock, flags);
+
+       return found ? portid : NULL;
+}
+
+static int usbback_portid_add(const char *busid, domid_t domid, unsigned 
handle,
+                             int portnum)
+{
+       struct vusb_port_id *portid;
+       unsigned long flags;
+
+       portid = kzalloc(sizeof(*portid), GFP_KERNEL);
+       if (!portid)
+               return -ENOMEM;
+
+       portid->domid = domid;
+       portid->handle = handle;
+       portid->portnum = portnum;
+
+       strlcpy(portid->phys_bus, busid, USBBACK_BUS_ID_SIZE);
+
+       spin_lock_irqsave(&usbback_port_list_lock, flags);
+       list_add(&portid->id_list, &usbback_port_list);
+       spin_unlock_irqrestore(&usbback_port_list_lock, flags);
+
+       return 0;
+}
+
+static int usbback_portid_remove(domid_t domid, unsigned handle, int portnum)
+{
+       struct vusb_port_id *portid, *tmp;
+       int err = -ENOENT;
+       unsigned long flags;
+
+       spin_lock_irqsave(&usbback_port_list_lock, flags);
+       list_for_each_entry_safe(portid, tmp, &usbback_port_list, id_list)
+               if (portid->domid == domid && portid->handle == handle &&
+                   portid->portnum == portnum) {
+                       list_del(&portid->id_list);
+                       kfree(portid);
+                       err = 0;
+               }
+       spin_unlock_irqrestore(&usbback_port_list_lock, flags);
+
+       return err;
+}
+
+static struct usbstub *usbback_stub_alloc(struct usb_device *udev,
+                                         struct vusb_port_id *portid)
+{
+       struct usbstub *stub;
+
+       stub = kzalloc(sizeof(*stub), GFP_KERNEL);
+       if (!stub)
+               return NULL;
+
+       kref_init(&stub->kref);
+       stub->udev = usb_get_dev(udev);
+       stub->portid = portid;
+       spin_lock_init(&stub->submitting_lock);
+       INIT_LIST_HEAD(&stub->submitting_list);
+
+       return stub;
+}
+
+static void usbback_stub_release(struct kref *kref)
+{
+       struct usbstub *stub;
+
+       stub = container_of(kref, struct usbstub, kref);
+
+       usb_put_dev(stub->udev);
+       stub->udev = NULL;
+       stub->portid = NULL;
+       kfree(stub);
+}
+
+static struct usbback_info *usbback_find_usbif(domid_t domid, unsigned handle)
+{
+       struct usbback_info *usbif;
+       int found = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&usbback_usbif_list_lock, flags);
+       list_for_each_entry(usbif, &usbback_usbif_list, usbif_list)
+               if (usbif->domid == domid && usbif->handle == handle) {
+                       found = 1;
+                       break;
+               }
+       spin_unlock_irqrestore(&usbback_usbif_list_lock, flags);
+
+       return found ? usbif : NULL;
+}
+
+static int usbback_stub_probe(struct usb_interface *intf,
+                             const struct usb_device_id *id)
+{
+       struct usb_device *udev = interface_to_usbdev(intf);
+       const char *busid = dev_name(intf->dev.parent);
+       struct vusb_port_id *portid;
+       struct usbstub *stub;
+       struct usbback_info *usbif;
+
+       /* hub currently not supported, so skip. */
+       if (udev->descriptor.bDeviceClass == USB_CLASS_HUB)
+               return -ENODEV;
+
+       portid = usbback_find_portid_by_busid(busid);
+       if (!portid)
+               return -ENODEV;
+
+       usbif = usbback_find_usbif(portid->domid, portid->handle);
+       if (!usbif)
+               return -ENODEV;
+
+       switch (udev->speed) {
+       case USB_SPEED_LOW:
+       case USB_SPEED_FULL:
+               break;
+       case USB_SPEED_HIGH:
+               if (usbif->usb_ver >= USB_VER_USB20)
+                       break;
+               /* fall through */
+       default:
+               return -ENODEV;
+       }
+
+       stub = usbback_find_attached_device(usbif, portid->portnum);
+       if (!stub) {
+               /* new connection */
+               stub = usbback_stub_alloc(udev, portid);
+               if (!stub)
+                       return -ENOMEM;
+               usbback_attach_device(usbif, stub);
+               portid->speed = udev->speed;
+               usbback_hotplug_notify(usbif, portid);
+       } else {
+               /* maybe already called and connected by other intf */
+               if (strncmp(stub->portid->phys_bus, busid, USBBACK_BUS_ID_SIZE))
+                       return -ENODEV;
+       }
+
+       kref_get(&stub->kref);
+       usb_set_intfdata(intf, stub);
+       return 0;
+}
+
+static void usbback_stub_disconnect(struct usb_interface *intf)
+{
+       struct usbstub *stub = (struct usbstub *)usb_get_intfdata(intf);
+
+       usb_set_intfdata(intf, NULL);
+
+       if (!stub)
+               return;
+
+       if (stub->usbif) {
+               stub->portid->speed = 0;
+               usbback_hotplug_notify(stub->usbif, stub->portid);
+               usbback_detach_device(stub->usbif, stub);
+       }
+       usbback_unlink_urbs(stub);
+       kref_put(&stub->kref, usbback_stub_release);
+}
+
+static ssize_t usbback_stub_show_portids(struct device_driver *driver,
+                                        char *buf)
+{
+       struct vusb_port_id *portid;
+       size_t count = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&usbback_port_list_lock, flags);
+       list_for_each_entry(portid, &usbback_port_list, id_list) {
+               if (count >= PAGE_SIZE)
+                       break;
+               count += scnprintf((char *)buf + count, PAGE_SIZE - count,
+                               "%s:%d:%d:%d\n", portid->phys_bus,
+                               portid->domid, portid->handle, portid->portnum);
+       }
+       spin_unlock_irqrestore(&usbback_port_list_lock, flags);
+
+       return count;
+}
+static DRIVER_ATTR(port_ids, S_IRUSR, usbback_stub_show_portids, NULL);
+
+/* table of devices that matches any usbdevice */
+static const struct usb_device_id usbback_stub_table[] = {
+               { .driver_info = 1 }, /* wildcard, see usb_match_id() */
+               { } /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(usb, usbback_stub_table);
+
+static struct usb_driver usbback_usb_driver = {
+               .name = "usbback",
+               .probe = usbback_stub_probe,
+               .disconnect = usbback_stub_disconnect,
+               .id_table = usbback_stub_table,
+               .no_dynamic_id = 1,
+};
+
+static int __init usbback_stub_init(void)
+{
+       int err;
+
+       err = usb_register(&usbback_usb_driver);
+       if (err) {
+               pr_err("xen-pvusb: usb_register failed (%d)\n", err);
+               return err;
+       }
+
+       err = driver_create_file(&usbback_usb_driver.drvwrap.driver,
+                                &driver_attr_port_ids);
+       if (err)
+               usb_deregister(&usbback_usb_driver);
+
+       return err;
+}
+
+static void usbback_stub_exit(void)
+{
+       driver_remove_file(&usbback_usb_driver.drvwrap.driver,
+                          &driver_attr_port_ids);
+       usb_deregister(&usbback_usb_driver);
+}
+
+static int usbback_process_port(struct xenbus_transaction xbt,
+                               struct usbback_info *usbif,
+                               struct xenbus_device *dev, unsigned port)
+{
+       char node[8];
+       char *busid;
+       int err;
+       struct vusb_port_id *portid;
+
+       snprintf(node, sizeof(node), "port/%d", port);
+       busid = xenbus_read(xbt, dev->nodename, node, NULL);
+       if (IS_ERR(busid)) {
+               err = PTR_ERR(busid);
+               xenbus_dev_fatal(dev, err, "reading %s", node);
+               return err;
+       }
+
+       /* Remove portid, if the port is not connected.  */
+       if (strlen(busid) == 0) {
+               portid = usbback_find_portid(usbif->domid, usbif->handle, port);
+               if (portid)
+                       usbback_portid_remove(usbif->domid,
+                                             usbif->handle, port);
+               return 0; /* never configured, ignore */
+       }
+
+       /*
+        * Add portid, if the port isn't configured and unused from other usbif.
+        */
+       portid = usbback_find_portid(usbif->domid, usbif->handle, port);
+       if (portid) {
+               if ((strncmp(portid->phys_bus, busid, USBBACK_BUS_ID_SIZE)))
+                       xenbus_dev_fatal(dev, -EEXIST,
+                                        "can't add %s, remove first", node);
+       } else {
+               if (usbback_find_portid_by_busid(busid))
+                       xenbus_dev_fatal(dev, -EBUSY,
+                               "can't add %s, busid already used", node);
+               else
+                       usbback_portid_add(busid, usbif->domid, usbif->handle,
+                                          port);
+       }
+
+       return 0;
+}
+
+static void usbback_backend_changed(struct xenbus_watch *watch,
+                                   const char **vec, unsigned len)
+{
+       struct xenbus_transaction xbt;
+       int err;
+       unsigned i;
+       struct usbback_info *usbif;
+       struct xenbus_device *dev;
+
+       usbif = container_of(watch, struct usbback_info, backend_watch);
+       dev = usbif->xbdev;
+
+       do {
+               err = xenbus_transaction_start(&xbt);
+               if (err) {
+                       xenbus_dev_fatal(dev, err, "starting transaction");
+                       return;
+               }
+
+               for (i = 1; i <= usbif->num_ports; i++) {
+                       if (usbback_process_port(xbt, usbif, dev, i)) {
+                               xenbus_transaction_end(xbt, 1);
+                               return;
+                       }
+               }
+
+               err = xenbus_transaction_end(xbt, 0);
+       } while (err == -EAGAIN);
+
+       if (err)
+               xenbus_dev_fatal(dev, err, "completing transaction");
+}
+
+static void usbback_disconnect(struct usbback_info *usbif)
+{
+       struct usbstub *stub, *tmp;
+       unsigned long flags;
+
+       spin_lock_irqsave(&usbif->stub_lock, flags);
+       list_for_each_entry_safe(stub, tmp, &usbif->stub_list, dev_list) {
+               usbback_unlink_urbs(stub);
+               usbback_detach_device_without_lock(usbif, stub);
+       }
+       spin_unlock_irqrestore(&usbif->stub_lock, flags);
+
+       wait_event(usbif->waiting_to_free, atomic_read(&usbif->refcnt) == 0);
+
+       if (usbif->irq) {
+               unbind_from_irqhandler(usbif->irq, usbif);
+               usbif->irq = 0;
+       }
+
+       if (usbif->urb_ring.sring) {
+               xenbus_unmap_ring_vfree(usbif->xbdev, usbif->urb_ring.sring);
+               xenbus_unmap_ring_vfree(usbif->xbdev, usbif->conn_ring.sring);
+               usbif->urb_ring.sring = NULL;
+               usbif->conn_ring.sring = NULL;
+       }
+}
+
+static void usbback_free(struct usbback_info *usbif)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&usbback_usbif_list_lock, flags);
+       list_del(&usbif->usbif_list);
+       spin_unlock_irqrestore(&usbback_usbif_list_lock, flags);
+       kfree(usbif);
+}
+
+static int usbback_remove(struct xenbus_device *dev)
+{
+       struct usbback_info *usbif = dev_get_drvdata(&dev->dev);
+       int i;
+
+       if (!usbif)
+               return 0;
+
+       if (usbif->backend_watch.node) {
+               unregister_xenbus_watch(&usbif->backend_watch);
+               kfree(usbif->backend_watch.node);
+               usbif->backend_watch.node = NULL;
+       }
+
+       /* remove all ports */
+       for (i = 1; i <= usbif->num_ports; i++)
+               usbback_portid_remove(usbif->domid, usbif->handle, i);
+       usbback_disconnect(usbif);
+       usbback_free(usbif);
+
+       dev_set_drvdata(&dev->dev, NULL);
+
+       return 0;
+}
+
+static struct usbback_info *usbback_alloc(domid_t domid, unsigned handle)
+{
+       struct usbback_info *usbif;
+       unsigned long flags;
+       int i;
+
+       usbif = kzalloc(sizeof(struct usbback_info), GFP_KERNEL);
+       if (!usbif)
+               return NULL;
+
+       usbif->domid = domid;
+       usbif->handle = handle;
+       spin_lock_init(&usbif->urb_ring_lock);
+       spin_lock_init(&usbif->conn_ring_lock);
+       atomic_set(&usbif->refcnt, 0);
+       usbif->ring_error = 0;
+       init_waitqueue_head(&usbif->waiting_to_free);
+       spin_lock_init(&usbif->stub_lock);
+       INIT_LIST_HEAD(&usbif->stub_list);
+       INIT_LIST_HEAD(&usbif->hotplug_list);
+       spin_lock_init(&usbif->addr_lock);
+       for (i = 0; i < USB_DEV_ADDR_SIZE; i++)
+               usbif->addr_table[i] = NULL;
+
+       spin_lock_irqsave(&usbback_usbif_list_lock, flags);
+       list_add(&usbif->usbif_list, &usbback_usbif_list);
+       spin_unlock_irqrestore(&usbback_usbif_list_lock, flags);
+
+       return usbif;
+}
+
+static int usbback_probe(struct xenbus_device *dev,
+                         const struct xenbus_device_id *id)
+{
+       struct usbback_info *usbif;
+       unsigned long handle;
+       int num_ports;
+       int usb_ver;
+       int err;
+
+       if (usb_disabled())
+               return -ENODEV;
+
+       if (kstrtoul(strrchr(dev->otherend, '/') + 1, 0, &handle))
+               return -EINVAL;
+
+       usbif = usbback_alloc(dev->otherend_id, handle);
+       if (!usbif) {
+               xenbus_dev_fatal(dev, -ENOMEM, "allocating backend interface");
+               return -ENOMEM;
+       }
+       usbif->xbdev = dev;
+       dev_set_drvdata(&dev->dev, usbif);
+
+       err = xenbus_scanf(XBT_NIL, dev->nodename, "num-ports", "%d",
+                          &num_ports);
+       if (err != 1) {
+               xenbus_dev_fatal(dev, err, "reading num-ports");
+               goto fail;
+       }
+       if (num_ports < 1 || num_ports > USB_MAXCHILDREN) {
+               xenbus_dev_fatal(dev, err, "invalid num-ports");
+               goto fail;
+       }
+       usbif->num_ports = num_ports;
+
+       err = xenbus_scanf(XBT_NIL, dev->nodename, "usb-ver", "%d", &usb_ver);
+       if (err != 1) {
+               xenbus_dev_fatal(dev, err, "reading usb-ver");
+               goto fail;
+       }
+       switch (usb_ver) {
+       case USB_VER_USB11:
+       case USB_VER_USB20:
+               usbif->usb_ver = usb_ver;
+               break;
+       default:
+               xenbus_dev_fatal(dev, err, "invalid usb-ver");
+               goto fail;
+       }
+
+       err = xenbus_watch_pathfmt(dev, &usbif->backend_watch,
+                                  usbback_backend_changed, "%s/port",
+                                  dev->nodename);
+       if (err)
+               goto fail;
+
+       err = xenbus_switch_state(dev, XenbusStateConnected);
+       if (err)
+               goto fail;
+
+       return 0;
+
+fail:
+       usbback_remove(dev);
+       return err;
+}
+
+static int usbback_map(struct usbback_info *usbif, grant_ref_t urb_ring_ref,
+                      grant_ref_t conn_ring_ref, evtchn_port_t evtchn)
+{
+       int err;
+       void *addr;
+       struct usbif_urb_sring *urb_sring;
+       struct usbif_conn_sring *conn_sring;
+
+       if (usbif->irq)
+               return 0;
+
+       err = xenbus_map_ring_valloc(usbif->xbdev, urb_ring_ref, &addr);
+       if (err)
+               return err;
+       urb_sring = addr;
+       err = xenbus_map_ring_valloc(usbif->xbdev, conn_ring_ref, &addr);
+       if (err)
+               goto fail_alloc;
+       conn_sring = addr;
+
+       err = bind_interdomain_evtchn_to_irq(usbif->domid, evtchn);
+       if (err < 0)
+               goto fail_evtchn;
+       usbif->irq = err;
+
+       BACK_RING_INIT(&usbif->urb_ring, urb_sring, PAGE_SIZE);
+       BACK_RING_INIT(&usbif->conn_ring, conn_sring, PAGE_SIZE);
+
+       err = request_threaded_irq(usbif->irq, NULL, usbback_be_int,
+                                  IRQF_ONESHOT, "xen-usbif", usbif);
+       if (err)
+               goto free_irq;
+
+       return 0;
+
+free_irq:
+       unbind_from_irqhandler(usbif->irq, usbif);
+       usbif->irq = 0;
+       usbif->urb_ring.sring = NULL;
+       usbif->conn_ring.sring = NULL;
+fail_evtchn:
+       xenbus_unmap_ring_vfree(usbif->xbdev, conn_sring);
+fail_alloc:
+       xenbus_unmap_ring_vfree(usbif->xbdev, urb_sring);
+
+       return err;
+}
+
+static int usbback_connect_rings(struct usbback_info *usbif)
+{
+       struct xenbus_device *dev = usbif->xbdev;
+       unsigned urb_ring_ref, conn_ring_ref, evtchn;
+       int err;
+
+       err = xenbus_gather(XBT_NIL, dev->otherend,
+                           "urb-ring-ref", "%u", &urb_ring_ref,
+                           "conn-ring-ref", "%u", &conn_ring_ref,
+                           "event-channel", "%u", &evtchn, NULL);
+       if (err) {
+               xenbus_dev_fatal(dev, err,
+                                "reading %s/ring-ref and event-channel",
+                                dev->otherend);
+               return err;
+       }
+
+       pr_info("xen-pvusb: urb-ring-ref %u, conn-ring-ref %u, event-channel 
%u\n",
+               urb_ring_ref, conn_ring_ref, evtchn);
+
+       err = usbback_map(usbif, urb_ring_ref, conn_ring_ref, evtchn);
+       if (err)
+               xenbus_dev_fatal(dev, err,
+                       "mapping urb-ring-ref %u conn-ring-ref %u port %u",
+                       urb_ring_ref, conn_ring_ref, evtchn);
+
+       return err;
+}
+
+static void usbback_frontend_changed(struct xenbus_device *dev,
+                                    enum xenbus_state frontend_state)
+{
+       struct usbback_info *usbif = dev_get_drvdata(&dev->dev);
+
+       switch (frontend_state) {
+       case XenbusStateInitialised:
+       case XenbusStateReconfiguring:
+       case XenbusStateReconfigured:
+               break;
+
+       case XenbusStateInitialising:
+               if (dev->state == XenbusStateClosed) {
+                       pr_info("xen-pvusb: %s: prepare for reconnect\n",
+                               dev->nodename);
+                       xenbus_switch_state(dev, XenbusStateInitWait);
+               }
+               break;
+
+       case XenbusStateConnected:
+               if (dev->state != XenbusStateConnected)
+                       xenbus_switch_state(dev, XenbusStateConnected);
+
+               if (usbback_connect_rings(usbif))
+                       break;
+
+               usbback_do_hotplug(usbif);
+               break;
+
+       case XenbusStateClosing:
+               usbback_disconnect(usbif);
+               xenbus_switch_state(dev, XenbusStateClosing);
+               break;
+
+       case XenbusStateClosed:
+               xenbus_switch_state(dev, XenbusStateClosed);
+               if (xenbus_dev_is_online(dev))
+                       break;
+               /* fall through if not online */
+       case XenbusStateUnknown:
+               device_unregister(&dev->dev);
+               break;
+
+       default:
+               xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
+                                frontend_state);
+               break;
+       }
+}
+
+static const struct xenbus_device_id usbback_ids[] = {
+       { "vusb" },
+       { "" },
+};
+
+static struct xenbus_driver usbback_driver = {
+       .ids                    = usbback_ids,
+       .probe                  = usbback_probe,
+       .otherend_changed       = usbback_frontend_changed,
+       .remove                 = usbback_remove,
+};
+
+static int __init usbback_init(void)
+{
+       int err;
+
+       if (!xen_domain())
+               return -ENODEV;
+
+       usbback_cachep = kmem_cache_create("xen-usbif_cache",
+               sizeof(struct pending_req), 0, 0, NULL);
+       if (!usbback_cachep)
+               return -ENOMEM;
+
+       err = xenbus_register_backend(&usbback_driver);
+       if (err)
+               goto out_mem;
+
+       err = usbback_stub_init();
+       if (err)
+               goto out_xenbus;
+
+       return 0;
+
+out_xenbus:
+       usbback_stub_exit();
+out_mem:
+       kmem_cache_destroy(usbback_cachep);
+       return err;
+}
+module_init(usbback_init);
+
+static void __exit usbback_exit(void)
+{
+       xenbus_unregister_driver(&usbback_driver);
+       usbback_stub_exit();
+       kmem_cache_destroy(usbback_cachep);
+}
+module_exit(usbback_exit);
+
+MODULE_ALIAS("xen-backend:vusb");
+MODULE_AUTHOR("Juergen Gross <jgross@xxxxxxxx>");
+MODULE_DESCRIPTION("Xen USB backend driver (usbback)");
+MODULE_LICENSE("Dual BSD/GPL");
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.