[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] linux-2.6.18: cleanup to blkback and blktap



Remove unused/unneccessary fields of their pending_req_t structures,
and reduce the width of those structures' nr_pages field.

Move loop-invariant grant table flags calculation out of loops (also
in scsiback).

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>

--- a/drivers/xen/blkback/blkback.c
+++ b/drivers/xen/blkback/blkback.c
@@ -75,10 +75,9 @@ module_param(debug_lvl, int, 0644);
 typedef struct {
        blkif_t       *blkif;
        u64            id;
-       int            nr_pages;
        atomic_t       pendcnt;
+       unsigned short nr_pages;
        unsigned short operation;
-       int            status;
        struct list_head free_list;
 } pending_req_t;
 
@@ -257,22 +256,24 @@ int blkif_schedule(void *arg)
 
 static void __end_block_io_op(pending_req_t *pending_req, int error)
 {
+       int status = BLKIF_RSP_OKAY;
+
        /* An error fails the entire request. */
        if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
            (error == -EOPNOTSUPP)) {
                DPRINTK("blkback: write barrier op failed, not supported\n");
                blkback_barrier(XBT_NIL, pending_req->blkif->be, 0);
-               pending_req->status = BLKIF_RSP_EOPNOTSUPP;
+               status = BLKIF_RSP_EOPNOTSUPP;
        } else if (error) {
                DPRINTK("Buffer not up-to-date at end of operation, "
                        "error=%d\n", error);
-               pending_req->status = BLKIF_RSP_ERROR;
+               status = BLKIF_RSP_ERROR;
        }
 
        if (atomic_dec_and_test(&pending_req->pendcnt)) {
                fast_flush_area(pending_req);
                make_response(pending_req->blkif, pending_req->id,
-                             pending_req->operation, pending_req->status);
+                             pending_req->operation, status);
                blkif_put(pending_req->blkif);
                free_req(pending_req);
        }
@@ -389,7 +390,6 @@ static void dispatch_rw_block_io(blkif_t
                                 blkif_request_t *req,
                                 pending_req_t *pending_req)
 {
-       extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
        struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
        struct phys_req preq;
        struct { 
@@ -397,6 +397,7 @@ static void dispatch_rw_block_io(blkif_t
        } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
        unsigned int nseg;
        struct bio *bio = NULL;
+       uint32_t flags;
        int ret, i;
        int operation;
 
@@ -430,12 +431,13 @@ static void dispatch_rw_block_io(blkif_t
        pending_req->blkif     = blkif;
        pending_req->id        = req->id;
        pending_req->operation = req->operation;
-       pending_req->status    = BLKIF_RSP_OKAY;
        pending_req->nr_pages  = nseg;
 
-       for (i = 0; i < nseg; i++) {
-               uint32_t flags;
+       flags = GNTMAP_host_map;
+       if (operation != READ)
+               flags |= GNTMAP_readonly;
 
+       for (i = 0; i < nseg; i++) {
                seg[i].nsec = req->seg[i].last_sect -
                        req->seg[i].first_sect + 1;
 
@@ -444,9 +446,6 @@ static void dispatch_rw_block_io(blkif_t
                        goto fail_response;
                preq.nr_sects += seg[i].nsec;
 
-               flags = GNTMAP_host_map;
-               if (operation != READ)
-                       flags |= GNTMAP_readonly;
                gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
                                  req->seg[i].gref, blkif->domid);
        }
--- a/drivers/xen/blktap/blktap.c
+++ b/drivers/xen/blktap/blktap.c
@@ -136,20 +136,14 @@ module_param(debug_lvl, int, 0644);
 
 /*
  * Each outstanding request that we've passed to the lower device layers has a 
- * 'pending_req' allocated to it. Each buffer_head that completes decrements 
- * the pendcnt towards zero. When it hits zero, the specified domain has a 
- * response queued for it, with the saved 'id' passed back.
+ * 'pending_req' allocated to it.
  */
 typedef struct {
        blkif_t       *blkif;
        u64            id;
        unsigned short mem_idx;
-       int            nr_pages;
-       atomic_t       pendcnt;
-       unsigned short operation;
-       int            status;
+       unsigned short nr_pages;
        struct list_head free_list;
-       int            inuse;
 } pending_req_t;
 
 static pending_req_t *pending_reqs[MAX_PENDING_REQS];
@@ -996,10 +990,8 @@ static pending_req_t* alloc_req(void)
                list_del(&req->free_list);
        }
 
-       if (req) {
-               req->inuse = 1;
+       if (req)
                alloc_pending_reqs++;
-       }
        spin_unlock_irqrestore(&pending_free_lock, flags);
 
        return req;
@@ -1013,7 +1005,6 @@ static void free_req(pending_req_t *req)
        spin_lock_irqsave(&pending_free_lock, flags);
 
        alloc_pending_reqs--;
-       req->inuse = 0;
        if (mmap_lock && (req->mem_idx == mmap_alloc-1)) {
                mmap_inuse--;
                if (mmap_inuse == 0) mmap_req_del(mmap_alloc-1);
@@ -1415,16 +1406,15 @@ static void dispatch_rw_block_io(blkif_t
                                 blkif_request_t *req,
                                 pending_req_t *pending_req)
 {
-       extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
-       int op, operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
        struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
        unsigned int nseg;
-       int ret, i, nr_sects = 0;
+       int ret, i, op, nr_sects = 0;
        tap_blkif_t *info;
        blkif_request_t *target;
        unsigned int mmap_idx = pending_req->mem_idx;
        unsigned int pending_idx = RTN_PEND_IDX(pending_req, mmap_idx);
        unsigned int usr_idx;
+       uint32_t flags;
        struct mm_struct *mm;
        struct vm_area_struct *vma = NULL;
 
@@ -1467,9 +1457,11 @@ static void dispatch_rw_block_io(blkif_t
 
        pending_req->blkif     = blkif;
        pending_req->id        = req->id;
-       pending_req->operation = operation;
-       pending_req->status    = BLKIF_RSP_OKAY;
        pending_req->nr_pages  = nseg;
+
+       flags = GNTMAP_host_map;
+       if (req->operation == BLKIF_OP_WRITE)
+               flags |= GNTMAP_readonly;
        op = 0;
        mm = info->mm;
        if (!xen_feature(XENFEAT_auto_translated_physmap))
@@ -1478,14 +1470,10 @@ static void dispatch_rw_block_io(blkif_t
                unsigned long uvaddr;
                unsigned long kvaddr;
                uint64_t ptep;
-               uint32_t flags;
 
                uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i);
                kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
 
-               flags = GNTMAP_host_map;
-               if (operation == WRITE)
-                       flags |= GNTMAP_readonly;
                gnttab_set_map_op(&map[op], kvaddr, flags,
                                  req->seg[i].gref, blkif->domid);
                op++;
@@ -1499,11 +1487,9 @@ static void dispatch_rw_block_io(blkif_t
                                goto fail_flush;
                        }
 
-                       flags = GNTMAP_host_map | GNTMAP_application_map
-                               | GNTMAP_contains_pte;
-                       if (operation == WRITE)
-                               flags |= GNTMAP_readonly;
-                       gnttab_set_map_op(&map[op], ptep, flags,
+                       gnttab_set_map_op(&map[op], ptep,
+                                         flags | GNTMAP_application_map
+                                               | GNTMAP_contains_pte,
                                          req->seg[i].gref, blkif->domid);
                        op++;
                }
@@ -1633,10 +1619,14 @@ static void dispatch_rw_block_io(blkif_t
        wmb(); /* blktap_poll() reads req_prod_pvt asynchronously */
        info->ufe_ring.req_prod_pvt++;
 
-       if (operation == READ)
+       switch (req->operation) {
+       case BLKIF_OP_READ:
                blkif->st_rd_sect += nr_sects;
-       else if (operation == WRITE)
+               break;
+       case BLKIF_OP_WRITE:
                blkif->st_wr_sect += nr_sects;
+               break;
+       }
 
        return;
 
--- a/drivers/xen/scsiback/scsiback.c
+++ b/drivers/xen/scsiback/scsiback.c
@@ -274,14 +274,14 @@ static int scsiback_gnttab_data_map(vscs
                        return -ENOMEM;
                }
 
-               for (i = 0; i < nr_segments; i++) {
-                       flags = GNTMAP_host_map;
-                       if (write)
-                               flags |= GNTMAP_readonly;
+               flags = GNTMAP_host_map;
+               if (write)
+                       flags |= GNTMAP_readonly;
+
+               for (i = 0; i < nr_segments; i++)
                        gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
                                                ring_req->seg[i].gref,
                                                info->domid);
-               }
 
                err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, 
nr_segments);
                BUG_ON(err);


Attachment: xen-block-backends-cleanup.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.