[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [RFC PATCH 2/23] DMOP: Introduce new DMOP commands for vIOMMU support



On Fri, Mar 17, 2017 at 07:27:02PM +0800, Lan Tianyu wrote:
> This patch is to introduce create, destroy and query capabilities
> command for vIOMMU. vIOMMU layer will deal with requests and call
> arch vIOMMU ops.
> 
> Signed-off-by: Lan Tianyu <tianyu.lan@xxxxxxxxx>
> ---
>  xen/arch/x86/hvm/dm.c          | 29 +++++++++++++++++++++++++++++
>  xen/include/public/hvm/dm_op.h | 39 +++++++++++++++++++++++++++++++++++++++
>  2 files changed, 68 insertions(+)
> 
> diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
> index 2122c45..2b28f70 100644
> --- a/xen/arch/x86/hvm/dm.c
> +++ b/xen/arch/x86/hvm/dm.c
> @@ -491,6 +491,35 @@ static int dm_op(domid_t domid,
>          break;
>      }
>  
> +    case XEN_DMOP_create_viommu:
> +    {
> +        struct xen_dm_op_create_viommu *data =
> +            &op.u.create_viommu;
> +
> +        rc = viommu_create(d, data->base_address, data->length, 
> data->capabilities);
> +        if (rc >= 0) {

The style guide is is to have a space here and { on a newline.

> +            data->viommu_id = rc;
> +            rc = 0;
> +        }
> +        break;
> +    }

Newline here..


> +    case XEN_DMOP_destroy_viommu:
> +    {
> +        const struct xen_dm_op_destroy_viommu *data =
> +            &op.u.destroy_viommu;
> +
> +        rc = viommu_destroy(d, data->viommu_id);
> +        break;
> +    }

Ahem?
> +    case XEN_DMOP_query_viommu_caps:
> +    {
> +        struct xen_dm_op_query_viommu_caps *data =
> +            &op.u.query_viommu_caps;
> +
> +        data->caps = viommu_query_caps(d);
> +        rc = 0;
> +        break;
> +    }

And here.
>      default:
>          rc = -EOPNOTSUPP;
>          break;
> diff --git a/xen/include/public/hvm/dm_op.h b/xen/include/public/hvm/dm_op.h
> index f54cece..b8c7359 100644
> --- a/xen/include/public/hvm/dm_op.h
> +++ b/xen/include/public/hvm/dm_op.h
> @@ -318,6 +318,42 @@ struct xen_dm_op_inject_msi {
>      uint64_aligned_t addr;
>  };
>  
> +/*
> + * XEN_DMOP_create_viommu: Create vIOMMU device.
> + */
> +#define XEN_DMOP_create_viommu 15
> +
> +struct xen_dm_op_create_viommu {
> +    /* IN - MMIO base address of vIOMMU */

Any limit? Can it be zero?

> +    uint64_t base_address;
> +    /* IN - Length of MMIO region */

Any restrictions? Can it be say 2 bytes? Or is this in page-size granularity?

> +    uint64_t length;
> +    /* IN - Capabilities with which we want to create */
> +    uint64_t capabilities;

That sounds like some form of flags?

> +    /* OUT - vIOMMU identity */
> +    uint32_t viommu_id;
> +};
> +
> +/*
> + * XEN_DMOP_destroy_viommu: Destroy vIOMMU device.
> + */
> +#define XEN_DMOP_destroy_viommu 16
> +
> +struct xen_dm_op_destroy_viommu {
> +    /* OUT - vIOMMU identity */

Out? Not in?

> +    uint32_t viommu_id;
> +};
> +
> +/*
> + * XEN_DMOP_q_viommu: Query vIOMMU capabilities.
> + */
> +#define XEN_DMOP_query_viommu_caps 17
> +
> +struct xen_dm_op_query_viommu_caps {
> +    /* OUT - vIOMMU Capabilities*/

Don't you need to also mention which vIOMMU? As you
could have potentially many of them?

> +    uint64_t caps;
> +};
> +
>  struct xen_dm_op {
>      uint32_t op;
>      uint32_t pad;
> @@ -336,6 +372,9 @@ struct xen_dm_op {
>          struct xen_dm_op_set_mem_type set_mem_type;
>          struct xen_dm_op_inject_event inject_event;
>          struct xen_dm_op_inject_msi inject_msi;
> +        struct xen_dm_op_create_viommu create_viommu;
> +        struct xen_dm_op_destroy_viommu destroy_viommu;
> +        struct xen_dm_op_query_viommu_caps query_viommu_caps;
>      } u;
>  };
>  
> -- 
> 1.8.3.1
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> https://lists.xen.org/xen-devel

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.