[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH V2 10/11] libxl_qmp, Introduce libxl__qmp_pci_add.



On Thu, 2011-10-20 at 18:59 +0100, Anthony PERARD wrote:
> This function insert a PCI passthrough device in qemu.
> 
> Signed-off-by: Anthony PERARD <anthony.perard@xxxxxxxxxx>
> ---
>  tools/libxl/libxl_internal.h |    4 ++
>  tools/libxl/libxl_qmp.c      |  107 
> ++++++++++++++++++++++++++++++++++++++++++
>  2 files changed, 105 insertions(+), 0 deletions(-)
> 
> diff --git a/tools/libxl/libxl_internal.h b/tools/libxl/libxl_internal.h
> index 849b251..09f618f 100644
> --- a/tools/libxl/libxl_internal.h
> +++ b/tools/libxl/libxl_internal.h
> @@ -119,6 +119,9 @@ typedef struct {
>  } libxl__device;
>  
>  #define XC_PCI_BDF             "0x%x, 0x%x, 0x%x, 0x%x"
> +#define PCI_DEVFN(slot, func)   ((((slot) & 0x1f) << 3) | ((func) & 0x07))
> +#define PCI_SLOT(devfn)         (((devfn) >> 3) & 0x1f)
> +#define PCI_FUNC(devfn)         ((devfn) & 0x07)
>  #define AUTO_PHP_SLOT          0x100
>  #define SYSFS_PCI_DEV          "/sys/bus/pci/devices"
>  #define SYSFS_PCIBACK_DRIVER   "/sys/bus/pci/drivers/pciback"
> @@ -444,6 +447,7 @@ _hidden libxl__qmp_handler 
> *libxl__qmp_initialize(libxl_ctx *ctx,
>                                                    uint32_t domid);
>  /* ask to QEMU the serial port information and store it in xenstore. */
>  _hidden int libxl__qmp_query_serial(libxl__qmp_handler *qmp);
> +_hidden int libxl__qmp_pci_add(libxl__gc *gc, int d, libxl_device_pci 
> *pcidev);
>  /* close and free the QMP handler */
>  _hidden void libxl__qmp_close(libxl__qmp_handler *qmp);
>  /* remove the socket file, if the file has already been removed,
> diff --git a/tools/libxl/libxl_qmp.c b/tools/libxl/libxl_qmp.c
> index 547cd53..bce7e16 100644
> --- a/tools/libxl/libxl_qmp.c
> +++ b/tools/libxl/libxl_qmp.c
> @@ -41,6 +41,7 @@
>   */
>  
>  #define QMP_RECEIVE_BUFFER_SIZE 4096
> +#define PCI_PT_QDEV_ID "pci-pt-%02x_%02x.%01x"
>  
>  typedef int (*qmp_callback_t)(libxl__qmp_handler *qmp,
>                                const libxl__json_object *tree,
> @@ -619,6 +620,112 @@ int libxl__qmp_query_serial(libxl__qmp_handler *qmp)
>      return rc;
>  }
>  
> +static int pci_add_callback(libxl__qmp_handler *qmp,
> +                            const libxl__json_object *response, void *opaque)
> +{
> +    libxl_device_pci *pcidev = opaque;
> +    const libxl__json_object *bus = NULL;
> +    libxl__gc gc = LIBXL_INIT_GC(qmp->ctx);
> +    int i, j, rc = -1;
> +    char *asked_id = libxl__sprintf(&gc, PCI_PT_QDEV_ID,
> +                                    pcidev->bus, pcidev->dev, pcidev->func);
> +
> +    for (i = 0; (bus = libxl__json_array_get(response, i)); i++) {
> +        const libxl__json_object *devices = NULL;
> +        const libxl__json_object *device = NULL;
> +        const libxl__json_object *o = NULL;
> +        const char *id = NULL;
> +
> +        devices = libxl__json_map_get("devices", bus, JSON_ARRAY);
> +
> +        for (j = 0; (device = libxl__json_array_get(devices, j)); j++) {
> +             o = libxl__json_map_get("qdev_id", device, JSON_STRING);
> +             id = libxl__json_object_get_string(o);
> +
> +             if (id && strcmp(asked_id, id) == 0) {
> +                 int dev_slot, dev_func;
> +
> +                 o = libxl__json_map_get("slot", device, JSON_INTEGER);
> +                 if (!o)
> +                     goto out;
> +                 dev_slot = libxl__json_object_get_integer(o);
> +                 o = libxl__json_map_get("function", device, JSON_INTEGER);
> +                 if (!o)
> +                     goto out;
> +                 dev_func = libxl__json_object_get_integer(o);
> +
> +                 pcidev->vdevfn = PCI_DEVFN(dev_slot, dev_func);
> +
> +                 rc = 0;
> +                 goto out;
> +             }
> +        }
> +    }
> +
> +
> +out:
> +    libxl__free_all(&gc);
> +    return rc;
> +}
> +
> +int libxl__qmp_pci_add(libxl__gc *gc, int domid, libxl_device_pci *pcidev)
> +{
> +    libxl__qmp_handler *qmp = NULL;
> +    flexarray_t *parameters = NULL;
> +    libxl_key_value_list args = NULL;
> +    char *hostaddr = NULL;
> +    int rc = 0;
> +    qmp_request_handle request = {
> +        .callback = pci_add_callback,
> +        .opaque = pcidev,
> +    };
> +
> +    qmp = libxl__qmp_initialize(libxl__gc_owner(gc), domid);
> +    if (!qmp)
> +        return -1;
> +
> +    hostaddr = libxl__sprintf(gc, "%04x:%02x:%02x.%01x", pcidev->domain,
> +                              pcidev->bus, pcidev->dev, pcidev->func);
> +    if (!hostaddr)
> +        return -1;
> +
> +    parameters = flexarray_make(6, 1);
> +    flexarray_append_pair(parameters, "driver", "xen-pci-passthrough");
> +    flexarray_append_pair(parameters, "id",
> +                          libxl__sprintf(gc, PCI_PT_QDEV_ID,
> +                                         pcidev->bus, pcidev->dev,
> +                                         pcidev->func));
> +    flexarray_append_pair(parameters, "hostaddr", hostaddr);
> +    if (pcidev->vdevfn) {
> +        flexarray_append_pair(parameters, "addr",
> +                              libxl__sprintf(gc, "%x.%x",
> +                                             PCI_SLOT(pcidev->vdevfn),
> +                                             PCI_FUNC(pcidev->vdevfn)));
> +    }
> +    args = libxl__xs_kvs_of_flexarray(gc, parameters, parameters->count);
> +    if (!args)
> +        return -1;
> +
> +    rc = qmp_synchronous_send(qmp, "device_add", &args, NULL, qmp->timeout);
> +    if (rc == 0) {
> +        rc = qmp_synchronous_send(qmp, "query-pci", NULL,
> +                                  &request, qmp->timeout);
> +        if (rc == 0) {
> +            rc = request.rc;
> +        }

Is every caller going to want this behaviour? Perhaps it belongs in
qmp_sync..._send?

> +    }
> +
> +    flexarray_free(parameters);
> +    libxl__qmp_close(qmp);
> +    return rc;
> +}
> +
>  int libxl__qmp_initializations(libxl_ctx *ctx, uint32_t domid)
>  {
>      libxl__qmp_handler *qmp = NULL;



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.