[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v3 1/1] xen/blkback: rework connect_ring() to avoid inconsistent xenstore 'ring-page-order' set by malicious blkfront



Ping?

Dongli Zhang

On 12/19/2018 09:23 PM, Dongli Zhang wrote:
> The xenstore 'ring-page-order' is used globally for each blkback queue and
> therefore should be read from xenstore only once. However, it is obtained
> in read_per_ring_refs() which might be called multiple times during the
> initialization of each blkback queue.
> 
> If the blkfront is malicious and the 'ring-page-order' is set in different
> value by blkfront every time before blkback reads it, this may end up at
> the "WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));" in
> xen_blkif_disconnect() when frontend is destroyed.
> 
> This patch reworks connect_ring() to read xenstore 'ring-page-order' only
> once.
> 
> Signed-off-by: Dongli Zhang <dongli.zhang@xxxxxxxxxx>
> ---
> Changed since v1:
>   * change the order of xenstore read in read_per_ring_refs
>   * use xenbus_read_unsigned() in connect_ring()
> 
> Changed since v2:
>   * simplify the condition check as "(err != 1 && nr_grefs > 1)"
>   * avoid setting err as -EINVAL to remove extra one line of code
> 
>  drivers/block/xen-blkback/xenbus.c | 74 
> +++++++++++++++++++++-----------------
>  1 file changed, 41 insertions(+), 33 deletions(-)
> 
> diff --git a/drivers/block/xen-blkback/xenbus.c 
> b/drivers/block/xen-blkback/xenbus.c
> index a4bc74e..dfea3a4 100644
> --- a/drivers/block/xen-blkback/xenbus.c
> +++ b/drivers/block/xen-blkback/xenbus.c
> @@ -926,7 +926,7 @@ static int read_per_ring_refs(struct xen_blkif_ring 
> *ring, const char *dir)
>       int err, i, j;
>       struct xen_blkif *blkif = ring->blkif;
>       struct xenbus_device *dev = blkif->be->dev;
> -     unsigned int ring_page_order, nr_grefs, evtchn;
> +     unsigned int nr_grefs, evtchn;
>  
>       err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u",
>                         &evtchn);
> @@ -936,43 +936,36 @@ static int read_per_ring_refs(struct xen_blkif_ring 
> *ring, const char *dir)
>               return err;
>       }
>  
> -     err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
> -                       &ring_page_order);
> +     nr_grefs = blkif->nr_ring_pages;
> +     WARN_ON(!nr_grefs);
> +
> +     for (i = 0; i < nr_grefs; i++) {
> +             char ring_ref_name[RINGREF_NAME_LEN];
> +
> +             snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
> +             err = xenbus_scanf(XBT_NIL, dir, ring_ref_name,
> +                                "%u", &ring_ref[i]);
> +
> +             if (err != 1 && nr_grefs > 1) {
> +                     xenbus_dev_fatal(dev, err, "reading %s/%s",
> +                                      dir, ring_ref_name);
> +                     return -EINVAL;
> +             }
> +
> +             if (err != 1)
> +                     break;
> +     }
> +
>       if (err != 1) {
> -             err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u", 
> &ring_ref[0]);
> +             WARN_ON(nr_grefs != 1);
> +
> +             err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u",
> +                                &ring_ref[0]);
>               if (err != 1) {
> -                     err = -EINVAL;
>                       xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
> -                     return err;
> -             }
> -             nr_grefs = 1;
> -     } else {
> -             unsigned int i;
> -
> -             if (ring_page_order > xen_blkif_max_ring_order) {
> -                     err = -EINVAL;
> -                     xenbus_dev_fatal(dev, err, "%s/request %d ring page 
> order exceed max:%d",
> -                                      dir, ring_page_order,
> -                                      xen_blkif_max_ring_order);
> -                     return err;
> -             }
> -
> -             nr_grefs = 1 << ring_page_order;
> -             for (i = 0; i < nr_grefs; i++) {
> -                     char ring_ref_name[RINGREF_NAME_LEN];
> -
> -                     snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", 
> i);
> -                     err = xenbus_scanf(XBT_NIL, dir, ring_ref_name,
> -                                        "%u", &ring_ref[i]);
> -                     if (err != 1) {
> -                             err = -EINVAL;
> -                             xenbus_dev_fatal(dev, err, "reading %s/%s",
> -                                              dir, ring_ref_name);
> -                             return err;
> -                     }
> +                     return -EINVAL;
>               }
>       }
> -     blkif->nr_ring_pages = nr_grefs;
>  
>       for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
>               req = kzalloc(sizeof(*req), GFP_KERNEL);
> @@ -1030,6 +1023,7 @@ static int connect_ring(struct backend_info *be)
>       size_t xspathsize;
>       const size_t xenstore_path_ext_size = 11; /* sufficient for 
> "/queue-NNN" */
>       unsigned int requested_num_queues = 0;
> +     unsigned int ring_page_order;
>  
>       pr_debug("%s %s\n", __func__, dev->otherend);
>  
> @@ -1075,6 +1069,20 @@ static int connect_ring(struct backend_info *be)
>                be->blkif->nr_rings, be->blkif->blk_protocol, protocol,
>                pers_grants ? "persistent grants" : "");
>  
> +     ring_page_order = xenbus_read_unsigned(dev->otherend,
> +                                            "ring-page-order", 0);
> +
> +     if (ring_page_order > xen_blkif_max_ring_order) {
> +             err = -EINVAL;
> +             xenbus_dev_fatal(dev, err,
> +                              "requested ring page order %d exceed max:%d",
> +                              ring_page_order,
> +                              xen_blkif_max_ring_order);
> +             return err;
> +     }
> +
> +     be->blkif->nr_ring_pages = 1 << ring_page_order;
> +
>       if (be->blkif->nr_rings == 1)
>               return read_per_ring_refs(&be->blkif->rings[0], dev->otherend);
>       else {
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.