[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v15 04/11] x86/hvm/ioreq: defer mapping gfns until they are actually requsted



On Thu, Dec 14, 2017 at 05:41:37PM +0000, Paul Durrant wrote:
>A subsequent patch will introduce a new scheme to allow an emulator to
>map ioreq server pages directly from Xen rather than the guest P2M.
>
>This patch lays the groundwork for that change by deferring mapping of
>gfns until their values are requested by an emulator. To that end, the
>pad field of the xen_dm_op_get_ioreq_server_info structure is re-purposed
>to a flags field and new flag, XEN_DMOP_no_gfns, defined which modifies the
>behaviour of XEN_DMOP_get_ioreq_server_info to allow the caller to avoid
>requesting the gfn values.
>
>Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
>Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
>Acked-by: Wei Liu <wei.liu2@xxxxxxxxxx>
>Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
>---
>Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
>Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
>Cc: George Dunlap <George.Dunlap@xxxxxxxxxxxxx>
>Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
>Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
>Cc: Tim Deegan <tim@xxxxxxx>
>
>v8:
> - For safety make all of the pointers passed to
>   hvm_get_ioreq_server_info() optional.
> - Shrink bufioreq_handling down to a uint8_t.
>
>v3:
> - Updated in response to review comments from Wei and Roger.
> - Added a HANDLE_BUFIOREQ macro to make the code neater.
> - This patch no longer introduces a security vulnerability since there
>   is now an explicit limit on the number of ioreq servers that may be
>   created for any one domain.
>---
> tools/libs/devicemodel/core.c                   |  8 +++++
> tools/libs/devicemodel/include/xendevicemodel.h |  6 ++--
> xen/arch/x86/hvm/dm.c                           |  9 +++--
> xen/arch/x86/hvm/ioreq.c                        | 47 ++++++++++++++-----------
> xen/include/asm-x86/hvm/domain.h                |  2 +-
> xen/include/public/hvm/dm_op.h                  | 32 ++++++++++-------
> 6 files changed, 63 insertions(+), 41 deletions(-)
>
>diff --git a/tools/libs/devicemodel/core.c b/tools/libs/devicemodel/core.c
>index 355b7dec18..df2a8a0fe7 100644
>--- a/tools/libs/devicemodel/core.c
>+++ b/tools/libs/devicemodel/core.c
>@@ -204,6 +204,14 @@ int xendevicemodel_get_ioreq_server_info(
> 
>     data->id = id;
> 
>+    /*
>+     * If the caller is not requesting gfn values then instruct the
>+     * hypercall not to retrieve them as this may cause them to be
>+     * mapped.
>+     */
>+    if (!ioreq_gfn && !bufioreq_gfn)
>+        data->flags |= XEN_DMOP_no_gfns;
>+
>     rc = xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
>     if (rc)
>         return rc;
>diff --git a/tools/libs/devicemodel/include/xendevicemodel.h 
>b/tools/libs/devicemodel/include/xendevicemodel.h
>index dda0bc7695..fffee3a4a0 100644
>--- a/tools/libs/devicemodel/include/xendevicemodel.h
>+++ b/tools/libs/devicemodel/include/xendevicemodel.h
>@@ -61,11 +61,11 @@ int xendevicemodel_create_ioreq_server(
>  * @parm domid the domain id to be serviced
>  * @parm id the IOREQ Server id.
>  * @parm ioreq_gfn pointer to a xen_pfn_t to receive the synchronous ioreq
>- *                  gfn
>+ *                  gfn. (May be NULL if not required)
>  * @parm bufioreq_gfn pointer to a xen_pfn_t to receive the buffered ioreq
>- *                    gfn
>+ *                    gfn. (May be NULL if not required)
>  * @parm bufioreq_port pointer to a evtchn_port_t to receive the buffered
>- *                     ioreq event channel
>+ *                     ioreq event channel. (May be NULL if not required)
>  * @return 0 on success, -1 on failure.
>  */
> int xendevicemodel_get_ioreq_server_info(
>diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
>index a787f43737..3c617bd754 100644
>--- a/xen/arch/x86/hvm/dm.c
>+++ b/xen/arch/x86/hvm/dm.c
>@@ -416,16 +416,19 @@ static int dm_op(const struct dmop_args *op_args)
>     {
>         struct xen_dm_op_get_ioreq_server_info *data =
>             &op.u.get_ioreq_server_info;
>+        const uint16_t valid_flags = XEN_DMOP_no_gfns;
> 
>         const_op = false;
> 
>         rc = -EINVAL;
>-        if ( data->pad )
>+        if ( data->flags & ~valid_flags )
>             break;
> 
>         rc = hvm_get_ioreq_server_info(d, data->id,
>-                                       &data->ioreq_gfn,
>-                                       &data->bufioreq_gfn,
>+                                       (data->flags & XEN_DMOP_no_gfns) ?
>+                                       NULL : &data->ioreq_gfn,
>+                                       (data->flags & XEN_DMOP_no_gfns) ?
>+                                       NULL : &data->bufioreq_gfn,
>                                        &data->bufioreq_port);
>         break;
>     }
>diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
>index f913ed31fa..284eefeac5 100644
>--- a/xen/arch/x86/hvm/ioreq.c
>+++ b/xen/arch/x86/hvm/ioreq.c
>@@ -350,6 +350,9 @@ static void hvm_update_ioreq_evtchn(struct 
>hvm_ioreq_server *s,
>     }
> }
> 
>+#define HANDLE_BUFIOREQ(s) \
>+    ((s)->bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF)
>+
> static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s,
>                                      struct vcpu *v)
> {
>@@ -371,7 +374,7 @@ static int hvm_ioreq_server_add_vcpu(struct 
>hvm_ioreq_server *s,
> 
>     sv->ioreq_evtchn = rc;
> 
>-    if ( v->vcpu_id == 0 && s->bufioreq.va != NULL )
>+    if ( v->vcpu_id == 0 && HANDLE_BUFIOREQ(s) )
>     {
>         struct domain *d = s->domain;
> 
>@@ -422,7 +425,7 @@ static void hvm_ioreq_server_remove_vcpu(struct 
>hvm_ioreq_server *s,
> 
>         list_del(&sv->list_entry);
> 
>-        if ( v->vcpu_id == 0 && s->bufioreq.va != NULL )
>+        if ( v->vcpu_id == 0 && HANDLE_BUFIOREQ(s) )
>             free_xen_event_channel(v->domain, s->bufioreq_evtchn);
> 
>         free_xen_event_channel(v->domain, sv->ioreq_evtchn);
>@@ -449,7 +452,7 @@ static void hvm_ioreq_server_remove_all_vcpus(struct 
>hvm_ioreq_server *s)
> 
>         list_del(&sv->list_entry);
> 
>-        if ( v->vcpu_id == 0 && s->bufioreq.va != NULL )
>+        if ( v->vcpu_id == 0 && HANDLE_BUFIOREQ(s) )
>             free_xen_event_channel(v->domain, s->bufioreq_evtchn);
> 
>         free_xen_event_channel(v->domain, sv->ioreq_evtchn);
>@@ -460,14 +463,13 @@ static void hvm_ioreq_server_remove_all_vcpus(struct 
>hvm_ioreq_server *s)
>     spin_unlock(&s->lock);
> }
> 
>-static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s,
>-                                      bool handle_bufioreq)
>+static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s)
> {
>     int rc;
> 
>     rc = hvm_map_ioreq_gfn(s, false);
> 
>-    if ( !rc && handle_bufioreq )
>+    if ( !rc && HANDLE_BUFIOREQ(s) )
>         rc = hvm_map_ioreq_gfn(s, true);
> 
>     if ( rc )
>@@ -597,13 +599,7 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server 
>*s,
>     if ( rc )
>         return rc;
> 
>-    if ( bufioreq_handling == HVM_IOREQSRV_BUFIOREQ_ATOMIC )
>-        s->bufioreq_atomic = true;
>-
>-    rc = hvm_ioreq_server_map_pages(
>-             s, bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF);

Why not set up mapping here for default ioserver?
Otherwise, old qemu won't trigger that.

Thanks
Chao

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.