Enhance blktap to be capable of dealing with bi-modal operation (frontend running in with different word size than backend). Index: sle10-sp1-2006-12-05/drivers/xen/blktap/blktap.c =================================================================== --- sle10-sp1-2006-12-05.orig/drivers/xen/blktap/blktap.c 2006-12-07 16:43:47.000000000 +0100 +++ sle10-sp1-2006-12-05/drivers/xen/blktap/blktap.c 2006-12-07 16:45:05.000000000 +0100 @@ -154,7 +154,7 @@ module_param(debug_lvl, int, 0644); */ typedef struct { blkif_t *blkif; - unsigned long id; + uint64_t id; unsigned short mem_idx; int nr_pages; atomic_t pendcnt; @@ -757,9 +757,9 @@ void blktap_kick_user(int idx) static int do_block_io_op(blkif_t *blkif); static void dispatch_rw_block_io(blkif_t *blkif, - blkif_request_t *req, + blkif_request_u *req, pending_req_t *pending_req); -static void make_response(blkif_t *blkif, unsigned long id, +static void make_response(blkif_t *blkif, uint64_t id, unsigned short op, int st); /****************************************************************** @@ -1085,18 +1085,26 @@ irqreturn_t tap_blkif_be_int(int irq, vo /****************************************************************** * DOWNWARD CALLS -- These interface with the block-device layer proper. */ +#ifdef CONFIG_XEN_BIMODAL_BACKENDS +#define req(op) (blkif_native(blkif) ? (req.nat.op) : (req.alt.op)) +#define preq(op) (blkif_native(blkif) ? (req->nat.op) : (req->alt.op)) +#else +#define req(op) (req.op) +#define preq(op) (req->op) +#endif + static int print_dbug = 1; static int do_block_io_op(blkif_t *blkif) { blkif_back_ring_t *blk_ring = &blkif->blk_ring; - blkif_request_t req; + blkif_request_u req; pending_req_t *pending_req; RING_IDX rc, rp; int more_to_do = 0; tap_blkif_t *info; rc = blk_ring->req_cons; - rp = blk_ring->sring->req_prod; + rp = blk_ring->BB_SRING(sring)->req_prod; rmb(); /* Ensure we see queued requests up to 'rp'. */ /*Check blkif has corresponding UE ring*/ @@ -1142,10 +1150,10 @@ static int do_block_io_op(blkif_t *blkif break; } - memcpy(&req, RING_GET_REQUEST(blk_ring, rc), sizeof(req)); + BB_RING_COPY_REQUEST(req, blk_ring, rc); blk_ring->req_cons = ++rc; /* before make_response() */ - switch (req.operation) { + switch (req(operation)) { case BLKIF_OP_READ: blkif->st_rd_req++; dispatch_rw_block_io(blkif, &req, pending_req); @@ -1158,8 +1166,8 @@ static int do_block_io_op(blkif_t *blkif default: WPRINTK("unknown operation [%d]\n", - req.operation); - make_response(blkif, req.id, req.operation, + req(operation)); + make_response(blkif, req(id), req(operation), BLKIF_RSP_ERROR); free_req(pending_req); break; @@ -1172,11 +1180,11 @@ static int do_block_io_op(blkif_t *blkif } static void dispatch_rw_block_io(blkif_t *blkif, - blkif_request_t *req, + blkif_request_u *req, pending_req_t *pending_req) { extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]); - int op, operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ; + int op, operation = (preq(operation) == BLKIF_OP_WRITE) ? WRITE : READ; struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2]; unsigned int nseg; int ret, i; @@ -1202,7 +1210,7 @@ static void dispatch_rw_block_io(blkif_t } /* Check that number of segments is sane. */ - nseg = req->nr_segments; + nseg = preq(nr_segments); if ( unlikely(nseg == 0) || unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) ) { WPRINTK("Bad number of segments in request (%d)\n", nseg); @@ -1224,7 +1232,7 @@ static void dispatch_rw_block_io(blkif_t } pending_req->blkif = blkif; - pending_req->id = req->id; + pending_req->id = preq(id); pending_req->operation = operation; pending_req->status = BLKIF_RSP_OKAY; pending_req->nr_pages = nseg; @@ -1238,13 +1246,13 @@ static void dispatch_rw_block_io(blkif_t uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i); kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i); - sector = req->sector_number + ((PAGE_SIZE / 512) * i); + sector = preq(sector_number) + ((PAGE_SIZE / 512) * i); if( (blkif->sectors > 0) && (sector >= blkif->sectors) ) { WPRINTK("BLKTAP: Sector request greater" "than size\n"); WPRINTK("BLKTAP: %s request sector" "[%llu,%llu], Total [%llu]\n", - (req->operation == + (preq(operation) == BLKIF_OP_WRITE ? "WRITE" : "READ"), (long long unsigned) sector, (long long unsigned) sector>>9, @@ -1255,7 +1263,7 @@ static void dispatch_rw_block_io(blkif_t if (operation == WRITE) flags |= GNTMAP_readonly; gnttab_set_map_op(&map[op], kvaddr, flags, - req->seg[i].gref, blkif->domid); + preq(seg)[i].gref, blkif->domid); op++; if (!xen_feature(XENFEAT_auto_translated_physmap)) { @@ -1272,7 +1280,7 @@ static void dispatch_rw_block_io(blkif_t if (operation == WRITE) flags |= GNTMAP_readonly; gnttab_set_map_op(&map[op], ptep, flags, - req->seg[i].gref, blkif->domid); + preq(seg)[i].gref, blkif->domid); op++; } } @@ -1356,7 +1364,7 @@ static void dispatch_rw_block_io(blkif_t if (xen_feature(XENFEAT_auto_translated_physmap)) down_write(&info->vma->vm_mm->mmap_sem); /* Mark mapped pages as reserved: */ - for (i = 0; i < req->nr_segments; i++) { + for (i = 0; i < preq(nr_segments); i++) { unsigned long kvaddr; struct page *pg; @@ -1383,7 +1391,21 @@ static void dispatch_rw_block_io(blkif_t /* Finally, write the request message to the user ring. */ target = RING_GET_REQUEST(&info->ufe_ring, info->ufe_ring.req_prod_pvt); - memcpy(target, req, sizeof(*req)); +#ifndef CONFIG_XEN_BIMODAL_BACKENDS + memcpy(target, &req, sizeof(req)); +#else + if (blkif_native(blkif)) + memcpy(target, &req->nat, sizeof(req->nat)); + else { + target->operation = req->alt.operation; + target->nr_segments = req->alt.nr_segments; + target->handle = req->alt.handle; + target->sector_number = req->alt.sector_number; + BUILD_BUG_ON((typeof(target->seg)*)0 != (typeof(req->alt.seg)*)0); + memcpy(target->seg, req->alt.seg, + target->nr_segments * sizeof(*target->seg)); + } +#endif target->id = usr_idx; wmb(); /* blktap_poll() reads req_prod_pvt asynchronously */ info->ufe_ring.req_prod_pvt++; @@ -1393,10 +1415,11 @@ static void dispatch_rw_block_io(blkif_t WPRINTK("Reached Fail_flush\n"); fast_flush_area(pending_req, pending_idx, usr_idx, blkif->dev_num); fail_response: - make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR); + make_response(blkif, preq(id), preq(operation), BLKIF_RSP_ERROR); free_req(pending_req); } +#undef req /****************************************************************** @@ -1404,10 +1427,15 @@ static void dispatch_rw_block_io(blkif_t */ -static void make_response(blkif_t *blkif, unsigned long id, +static void make_response(blkif_t *blkif, uint64_t id, unsigned short op, int st) { - blkif_response_t *resp; + blkif_response_u resp; +#ifdef CONFIG_XEN_BIMODAL_BACKENDS +#define resp(op) (blkif_native(blkif) ? (resp.nat->op) : (resp.alt->op)) +#else +#define resp(op) (resp->op) +#endif unsigned long flags; blkif_back_ring_t *blk_ring = &blkif->blk_ring; int more_to_do = 0; @@ -1415,12 +1443,12 @@ static void make_response(blkif_t *blkif spin_lock_irqsave(&blkif->blk_ring_lock, flags); /* Place on the response ring for the relevant domain. */ - resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt); - resp->id = id; - resp->operation = op; - resp->status = st; + BB_RING_GET_RESPONSE(resp, blk_ring, blk_ring->rsp_prod_pvt); + resp(id = id); + resp(operation = op); + resp(status = st); blk_ring->rsp_prod_pvt++; - RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(blk_ring, notify); + BB_RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(blk_ring, notify); if (blk_ring->rsp_prod_pvt == blk_ring->req_cons) { /* @@ -1428,8 +1456,8 @@ static void make_response(blkif_t *blkif * notifications if requests are already in flight (lower * overheads and promotes batching). */ - RING_FINAL_CHECK_FOR_REQUESTS(blk_ring, more_to_do); - } else if (RING_HAS_UNCONSUMED_REQUESTS(blk_ring)) { + BB_RING_FINAL_CHECK_FOR_REQUESTS(blk_ring, more_to_do); + } else if (BB_RING_HAS_UNCONSUMED_REQUESTS(blk_ring)) { more_to_do = 1; } @@ -1438,6 +1466,7 @@ static void make_response(blkif_t *blkif blkif_notify_work(blkif); if (notify) notify_remote_via_irq(blkif->irq); +#undef resp } static int __init blkif_init(void) Index: sle10-sp1-2006-12-05/drivers/xen/blktap/common.h =================================================================== --- sle10-sp1-2006-12-05.orig/drivers/xen/blktap/common.h 2006-12-07 16:43:47.000000000 +0100 +++ sle10-sp1-2006-12-05/drivers/xen/blktap/common.h 2006-12-07 16:45:05.000000000 +0100 @@ -39,8 +39,10 @@ #include #include #include +#ifdef CONFIG_XEN_BIMODAL_BACKENDS +#define BLKIF_BIMODAL +#endif #include -#include #include #include @@ -54,6 +56,9 @@ struct backend_info; typedef struct blkif_st { /* Unique identifier for this interface. */ domid_t domid; +#ifdef CONFIG_XEN_BIMODAL_BACKENDS + unsigned char native; +#endif unsigned int handle; /* Physical parameters of the comms window. */ unsigned int evtchn; @@ -87,6 +92,15 @@ typedef struct blkif_st { uint64_t sectors; } blkif_t; +#ifdef CONFIG_XEN_BIMODAL_BACKENDS +#define blkif_native(blkif) ((blkif)->native) +static inline int ring_native(const blkif_back_ring_t *ring) { + return blkif_native(container_of(ring, blkif_t, blk_ring)); +} +#else +#define blkif_native(blkif) 1 +#endif + blkif_t *tap_alloc_blkif(domid_t domid); void tap_blkif_free(blkif_t *blkif); int tap_blkif_map(blkif_t *blkif, unsigned long shared_page, Index: sle10-sp1-2006-12-05/drivers/xen/blktap/interface.c =================================================================== --- sle10-sp1-2006-12-05.orig/drivers/xen/blktap/interface.c 2006-12-07 16:43:47.000000000 +0100 +++ sle10-sp1-2006-12-05/drivers/xen/blktap/interface.c 2006-12-07 16:45:05.000000000 +0100 @@ -46,6 +46,9 @@ blkif_t *tap_alloc_blkif(domid_t domid) memset(blkif, 0, sizeof(*blkif)); blkif->domid = domid; +#ifdef CONFIG_XEN_BIMODAL_BACKENDS + blkif->native = 1; /* XXX */ +#endif spin_lock_init(&blkif->blk_ring_lock); atomic_set(&blkif->refcnt, 1); init_waitqueue_head(&blkif->wq); @@ -96,7 +99,7 @@ static void unmap_frontend_page(blkif_t int tap_blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn) { - blkif_sring_t *sring; + blkif_sring_u sring; int err; struct evtchn_bind_interdomain bind_interdomain; @@ -126,7 +129,7 @@ int tap_blkif_map(blkif_t *blkif, unsign blkif->evtchn = bind_interdomain.local_port; - sring = (blkif_sring_t *)blkif->blk_ring_area->addr; + BB_SRING(sring) = (void *)blkif->blk_ring_area->addr; BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE); blkif->irq = bind_evtchn_to_irqhandler( @@ -141,10 +144,10 @@ void tap_blkif_unmap(blkif_t *blkif) unbind_from_irqhandler(blkif->irq, blkif); blkif->irq = 0; } - if (blkif->blk_ring.sring) { + if (blkif->blk_ring.BB_SRING(sring)) { unmap_frontend_page(blkif); free_vm_area(blkif->blk_ring_area); - blkif->blk_ring.sring = NULL; + blkif->blk_ring.BB_SRING(sring) = NULL; } }