[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 2/6] xen-netfront: copy response out of shared buffer before accessing it



On 05/01/2018 12:01 AM, Marek Marczykowski-Górecki wrote:
Make local copy of the response, otherwise backend might modify it while
frontend is already processing it - leading to time of check / time of
use issue.

This is complementary to XSA155.

Cc: stable@xxxxxxxxxxxxxxx
Signed-off-by: Marek Marczykowski-Górecki <marmarek@xxxxxxxxxxxxxxxxxxxxxx>
---
  drivers/net/xen-netfront.c | 51 +++++++++++++++++++--------------------
  1 file changed, 25 insertions(+), 26 deletions(-)

diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 4dd0668..dc99763 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -387,13 +387,13 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
                rmb(); /* Ensure we see responses up to 'rp'. */
for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
Side comment: the original concern was expressed on the above counters,
will those be addressed as a dedicated series?
-                       struct xen_netif_tx_response *txrsp;
+                       struct xen_netif_tx_response txrsp;
- txrsp = RING_GET_RESPONSE(&queue->tx, cons);
-                       if (txrsp->status == XEN_NETIF_RSP_NULL)
+                       RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
+                       if (txrsp.status == XEN_NETIF_RSP_NULL)
                                continue;
IMO, there is still no guarantee you access consistent data after this change.
What if part of the response was ok when you started copying and
then, in the middle, backend poisons the end of the response?
This seems to be just like minimizing(?) chances to work with inconsistent
data rather than removing the possibility of such completely
-                       id  = txrsp->id;
+                       id  = txrsp.id;
                        skb = queue->tx_skbs[id].skb;
                        if (unlikely(gnttab_query_foreign_access(
                                queue->grant_tx_ref[id]) != 0)) {
@@ -741,7 +741,7 @@ static int xennet_get_extras(struct netfront_queue *queue,
                             RING_IDX rp)
{
-       struct xen_netif_extra_info *extra;
+       struct xen_netif_extra_info extra;
        struct device *dev = &queue->info->netdev->dev;
        RING_IDX cons = queue->rx.rsp_cons;
        int err = 0;
@@ -757,24 +757,23 @@ static int xennet_get_extras(struct netfront_queue *queue,
                        break;
                }
- extra = (struct xen_netif_extra_info *)
-                       RING_GET_RESPONSE(&queue->rx, ++cons);
+               RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
- if (unlikely(!extra->type ||
-                            extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
+               if (unlikely(!extra.type ||
+                            extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
                        if (net_ratelimit())
                                dev_warn(dev, "Invalid extra type: %d\n",
-                                       extra->type);
+                                       extra.type);
                        err = -EINVAL;
                } else {
-                       memcpy(&extras[extra->type - 1], extra,
-                              sizeof(*extra));
+                       memcpy(&extras[extra.type - 1], &extra,
+                              sizeof(extra));
                }
skb = xennet_get_rx_skb(queue, cons);
                ref = xennet_get_rx_ref(queue, cons);
                xennet_move_rx_slot(queue, skb, ref);
-       } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
+       } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
queue->rx.rsp_cons = cons;
        return err;
@@ -784,28 +783,28 @@ static int xennet_get_responses(struct netfront_queue 
*queue,
                                struct netfront_rx_info *rinfo, RING_IDX rp,
                                struct sk_buff_head *list)
  {
-       struct xen_netif_rx_response *rx = &rinfo->rx;
+       struct xen_netif_rx_response rx = rinfo->rx;
        struct xen_netif_extra_info *extras = rinfo->extras;
        struct device *dev = &queue->info->netdev->dev;
        RING_IDX cons = queue->rx.rsp_cons;
        struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
        grant_ref_t ref = xennet_get_rx_ref(queue, cons);
-       int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
+       int max = MAX_SKB_FRAGS + (rx.status <= RX_COPY_THRESHOLD);
        int slots = 1;
        int err = 0;
        unsigned long ret;
- if (rx->flags & XEN_NETRXF_extra_info) {
+       if (rx.flags & XEN_NETRXF_extra_info) {
                err = xennet_get_extras(queue, extras, rp);
                cons = queue->rx.rsp_cons;
        }
for (;;) {
-               if (unlikely(rx->status < 0 ||
-                            rx->offset + rx->status > XEN_PAGE_SIZE)) {
+               if (unlikely(rx.status < 0 ||
+                            rx.offset + rx.status > XEN_PAGE_SIZE)) {
                        if (net_ratelimit())
                                dev_warn(dev, "rx->offset: %u, size: %d\n",
-                                        rx->offset, rx->status);
+                                        rx.offset, rx.status);
                        xennet_move_rx_slot(queue, skb, ref);
                        err = -EINVAL;
                        goto next;
@@ -819,7 +818,7 @@ static int xennet_get_responses(struct netfront_queue 
*queue,
                if (ref == GRANT_INVALID_REF) {
                        if (net_ratelimit())
                                dev_warn(dev, "Bad rx response id %d.\n",
-                                        rx->id);
+                                        rx.id);
                        err = -EINVAL;
                        goto next;
                }
@@ -832,7 +831,7 @@ static int xennet_get_responses(struct netfront_queue 
*queue,
                __skb_queue_tail(list, skb);
next:
-               if (!(rx->flags & XEN_NETRXF_more_data))
+               if (!(rx.flags & XEN_NETRXF_more_data))
                        break;
if (cons + slots == rp) {
@@ -842,7 +841,7 @@ static int xennet_get_responses(struct netfront_queue 
*queue,
                        break;
                }
- rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
+               RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx);
                skb = xennet_get_rx_skb(queue, cons + slots);
                ref = xennet_get_rx_ref(queue, cons + slots);
                slots++;
@@ -898,9 +897,9 @@ static RING_IDX xennet_fill_frags(struct netfront_queue 
*queue,
        struct sk_buff *nskb;
while ((nskb = __skb_dequeue(list))) {
-               struct xen_netif_rx_response *rx =
-                       RING_GET_RESPONSE(&queue->rx, ++cons);
+               struct xen_netif_rx_response rx;
                skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
+               RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
if (shinfo->nr_frags == MAX_SKB_FRAGS) {
                        unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
@@ -911,7 +910,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue 
*queue,
                BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
-                               rx->offset, rx->status, PAGE_SIZE);
+                               rx.offset, rx.status, PAGE_SIZE);
skb_shinfo(nskb)->nr_frags = 0;
                kfree_skb(nskb);
@@ -1007,7 +1006,7 @@ static int xennet_poll(struct napi_struct *napi, int 
budget)
        i = queue->rx.rsp_cons;
        work_done = 0;
        while ((i != rp) && (work_done < budget)) {
-               memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
+               RING_COPY_RESPONSE(&queue->rx, i, rx);
                memset(extras, 0, sizeof(rinfo.extras));
err = xennet_get_responses(queue, &rinfo, rp, &tmpq);


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.