===== linux-2.6.11-xen-sparse/drivers/xen/blkback/blkback.c 1.38 vs edited ===== --- 1.38/linux-2.6.11-xen-sparse/drivers/xen/blkback/blkback.c 2005-04-05 05:28:05 -07:00 +++ edited/linux-2.6.11-xen-sparse/drivers/xen/blkback/blkback.c 2005-04-17 21:14:38 -07:00 @@ -115,6 +115,7 @@ static void fast_flush_area(int idx, int nr_pages) { + printk("cwc:%s:in\n", __FUNCTION__); #ifdef CONFIG_XEN_BLKDEV_GRANT gnttab_op_t aop[BLKIF_MAX_SEGMENTS_PER_REQUEST]; unsigned int i, invcount = 0; @@ -151,6 +152,7 @@ if ( unlikely(HYPERVISOR_multicall(mcl, nr_pages) != 0) ) BUG(); #endif + printk("cwc:%s:out\n", __FUNCTION__); } @@ -374,6 +376,7 @@ { int rsp = BLKIF_RSP_ERROR; int pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)]; + printk("cwc:%s:in id:%lu\n", __FUNCTION__, req->id); /* We expect one buffer only. */ if ( unlikely(req->nr_segments != 1) ) @@ -384,6 +387,7 @@ (blkif_last_sect(req->frame_and_sects[0]) != 7) ) goto out; + printk("cwc:%s:mid\n", __FUNCTION__); #ifdef CONFIG_XEN_BLKDEV_GRANT { gnttab_op_t op; @@ -428,12 +432,14 @@ #endif #endif /* endif CONFIG_XEN_BLKDEV_GRANT */ + printk("cwc:%s:vbd_probe\n", __FUNCTION__); rsp = vbd_probe(blkif, (vdisk_t *)MMAP_VADDR(pending_idx, 0), PAGE_SIZE / sizeof(vdisk_t)); out: fast_flush_area(pending_idx, 1); make_response(blkif, req->id, req->operation, rsp); + printk("cwc:%s:out response:%d\n", __FUNCTION__, rsp); } static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req) @@ -464,6 +470,7 @@ /* Check that number of segments is sane. */ nseg = req->nr_segments; + printk("cwc:%s:in nsegs:%u\n", __FUNCTION__, nseg); if ( unlikely(nseg == 0) || unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) ) { @@ -497,6 +504,7 @@ if ( unlikely(HYPERVISOR_grant_table_op( GNTTABOP_map_grant_ref, aop, nseg))) BUG(); + printk("cwc:%s:post map\n", __FUNCTION__); for ( i = 0; i < nseg; i++ ) { @@ -513,6 +521,7 @@ pending_handle(pending_idx, i) = aop[i].u.map_grant_ref.handle; } #endif + printk("cwc:%s:pre populate segs\n", __FUNCTION__); for ( i = 0; i < nseg; i++ ) { @@ -527,6 +536,8 @@ goto bad_descriptor; preq.nr_sects += seg[i].nsec; #endif + printk("cwc:%s: buf:%lx populated %u sects\n", + __FUNCTION__, seg[i].buf, seg[i].nsec); } if ( vbd_translate(&preq, blkif, operation) != 0 ) @@ -646,6 +657,7 @@ preq.sector_number += seg[i].nsec; } + printk("cwc:%s: post bio_add_page\n", __FUNCTION__); if ( (q = bdev_get_queue(bio->bi_bdev)) != plugged_queue ) { @@ -662,11 +674,13 @@ submit_bio(operation, biolist[i]); #endif + printk("cwc:%s: %d bios submitted\n", __FUNCTION__, nbio); return; bad_descriptor: make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR); + printk("cwc:%s: %d bad descriptor\n", __FUNCTION__); } ===== linux-2.6.11-xen-sparse/drivers/xen/blkfront/blkfront.c 1.52 vs edited ===== --- 1.52/linux-2.6.11-xen-sparse/drivers/xen/blkfront/blkfront.c 2005-04-05 00:43:40 -07:00 +++ edited/linux-2.6.11-xen-sparse/drivers/xen/blkfront/blkfront.c 2005-04-17 22:34:21 -07:00 @@ -226,6 +226,7 @@ { struct gendisk *gd = inode->i_bdev->bd_disk; struct xlbd_disk_info *di = (struct xlbd_disk_info *)gd->private_data; + printk("cwc:%s: called\n", __FUNCTION__); /* Update of usage count is protected by per-device semaphore. */ di->mi->usage++; @@ -238,6 +239,7 @@ { struct gendisk *gd = inode->i_bdev->bd_disk; struct xlbd_disk_info *di = (struct xlbd_disk_info *)gd->private_data; + printk("cwc:%s: called\n", __FUNCTION__); /* * When usage drops to zero it may allow more VBD updates to occur. @@ -255,6 +257,7 @@ unsigned command, unsigned long argument) { int i; + printk("cwc:%s: called\n", __FUNCTION__); DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n", command, (long)argument, inode->i_rdev); @@ -305,6 +308,7 @@ #ifdef CONFIG_XEN_BLKDEV_GRANT int ref; #endif + printk("cwc:%s: in blkif_state:%u\n", __FUNCTION__, blkif_state); if ( unlikely(blkif_state != BLKIF_STATE_CONNECTED) ) return 1; @@ -355,6 +359,7 @@ /* Keep a private copy so we can reissue requests when recovering. */ translate_req_to_pfn(&rec_ring[id], ring_req); + printk("cwc:%s: out\n", __FUNCTION__); return 0; } @@ -367,6 +372,7 @@ { struct request *req; int queued; + printk("cwc:%s: called\n", __FUNCTION__); DPRINTK("Entered do_blkif_request\n"); @@ -407,12 +413,15 @@ RING_IDX i, rp; unsigned long flags; + printk("cwc:%s: pre spin_lock_irqsave\n", __FUNCTION__); spin_lock_irqsave(&blkif_io_lock, flags); + printk("cwc:%s: post spin_lock_irqsave\n", __FUNCTION__); if ( unlikely(blkif_state == BLKIF_STATE_CLOSED) || unlikely(recovery) ) { spin_unlock_irqrestore(&blkif_io_lock, flags); + printk("cwc:%s: spin_unlock_irqsave\n", __FUNCTION__); return IRQ_HANDLED; } @@ -426,6 +435,7 @@ bret = RING_GET_RESPONSE(&blk_ring, i); id = bret->id; req = (struct request *)rec_ring[id].id; + printk("cwc:%s: blkif_completion call id %lu\n", __FUNCTION__, id); blkif_completion( &rec_ring[id] ); ADD_ID_TO_FREELIST(id); /* overwrites req */ @@ -454,12 +464,14 @@ BUG(); } } + printk("cwc:%s: post loop\n", __FUNCTION__); blk_ring.rsp_cons = i; kick_pending_request_queues(); spin_unlock_irqrestore(&blkif_io_lock, flags); + printk("cwc:%s: out unlock_irq_restore\n", __FUNCTION__); return IRQ_HANDLED; } @@ -768,6 +780,7 @@ #ifdef CONFIG_XEN_BLKDEV_GRANT int ref; #endif + printk("cwc:%s: in op:%d\n", __FUNCTION__, operation); fsect = (buffer_ma & ~PAGE_MASK) >> 9; lsect = fsect + nr_sectors - 1; @@ -809,6 +822,7 @@ (sg_dev == device) && (sg_next_sect == sector_number) ) { + printk("cwc:%s: building req\n", __FUNCTION__); req = RING_GET_REQUEST(&blk_ring, blk_ring.req_prod_pvt - 1); bh = (struct buffer_head *)id; @@ -842,10 +856,12 @@ /* Update the copy of the request in the recovery ring. */ translate_req_to_pfn(&rec_ring[req->id], req ); + printk("cwc:%s: access granted to %lx\n", __FUNCTION__, buffer_ma); return 0; } else if ( RING_FULL(&blk_ring) ) { + printk("cwc:%s: ring full\n", __FUNCTION__); return 1; } else @@ -857,8 +873,12 @@ break; default: + { + printk("cwc:%s: pre panic for unknown op %d\n", __FUNCTION__, operation); panic("unknown op %d\n", operation); + } } + printk("cwc:%s: mid\n", __FUNCTION__); /* Fill out a communications ring structure. */ req = RING_GET_REQUEST(&blk_ring, blk_ring.req_prod_pvt); @@ -892,6 +912,7 @@ blk_ring.req_prod_pvt++; + printk("cwc:%s: out buf:%lx\n", __FUNCTION__, buffer_ma); return 0; } @@ -1051,6 +1072,7 @@ ASSERT( ref != -ENOSPC ); gnttab_grant_foreign_access_ref( ref, rdomid, address >> PAGE_SHIFT, 0 ); + printk("cwc:%s: grant access to buf:%lx\n", __FUNCTION__, address >> PAGE_SHIFT); req->frame_and_sects[0] = (((u32) ref) << 16) | 7; @@ -1062,6 +1084,7 @@ { unsigned long flags, id; blkif_request_t *req_d; + printk("cwc:%s: in\n", __FUNCTION__); retry: while ( RING_FULL(&blk_ring) ) @@ -1070,9 +1093,12 @@ schedule_timeout(1); } + printk("cwc:%s: pre spin_lock_irqsave\n", __FUNCTION__); spin_lock_irqsave(&blkif_io_lock, flags); + printk("cwc:%s: post spin_lock_irqsave\n", __FUNCTION__); if ( RING_FULL(&blk_ring) ) { + printk("cwc:%s: spin_unlock_irqrestore\n", __FUNCTION__); spin_unlock_irqrestore(&blkif_io_lock, flags); goto retry; } @@ -1090,6 +1116,7 @@ blk_ring.req_prod_pvt++; flush_requests(); + printk("cwc:%s: spin_unlock_irqrestore 2\n", __FUNCTION__); spin_unlock_irqrestore(&blkif_io_lock, flags); while ( !blkif_control_rsp_valid ) @@ -1100,6 +1127,7 @@ memcpy(rsp, &blkif_control_rsp, sizeof(*rsp)); blkif_control_rsp_valid = 0; + printk("cwc:%s: out\n", __FUNCTION__); } @@ -1115,6 +1143,7 @@ msg->status = (ok ? BLKIF_DRIVER_STATUS_UP : BLKIF_DRIVER_STATUS_DOWN); + printk("cwc:%s: ctrl_if_send_message_block\n", __FUNCTION__); ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE); } @@ -1131,16 +1160,20 @@ msg->handle = 0; msg->shmem_frame = (virt_to_machine(blk_ring.sring) >> PAGE_SHIFT); + printk("cwc:%s: ctrl_if_send_message_block\n", __FUNCTION__); ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE); } static void blkif_free(void) { /* Prevent new requests being issued until we fix things up. */ + printk("cwc:%s: pre spin_lock_irq\n", __FUNCTION__); spin_lock_irq(&blkif_io_lock); + printk("cwc:%s: post spin_lock_irq\n", __FUNCTION__); recovery = 1; blkif_state = BLKIF_STATE_DISCONNECTED; spin_unlock_irq(&blkif_io_lock); + printk("cwc:%s: spin_unlock_irq\n", __FUNCTION__); /* Free resources associated with old device channel. */ if ( blk_ring.sring != NULL ) @@ -1153,16 +1186,19 @@ unbind_evtchn_from_irq(blkif_evtchn); blkif_evtchn = 0; + printk("cwc:%s: out\n", __FUNCTION__); } static void blkif_close(void) { + printk("cwc:%s: called\n", __FUNCTION__); } /* Move from CLOSED to DISCONNECTED state. */ static void blkif_disconnect(void) { blkif_sring_t *sring; + printk("cwc:%s: called\n", __FUNCTION__); if ( blk_ring.sring != NULL ) free_page((unsigned long)blk_ring.sring); @@ -1176,6 +1212,7 @@ static void blkif_reset(void) { + printk("cwc:%s: called\n", __FUNCTION__); blkif_free(); blkif_disconnect(); } @@ -1184,6 +1221,7 @@ { int i; blkif_request_t *req; + printk("cwc:%s: in\n", __FUNCTION__); /* Hmm, requests might be re-ordered when we re-issue them. * This will need to be fixed once we have barriers */ @@ -1199,6 +1237,7 @@ blk_ring.req_prod_pvt++; } } + printk("cwc:%s: post 1\n", __FUNCTION__); /* Stage 2 : Set up shadow list. */ for ( i = 0; i < blk_ring.req_prod_pvt; i++ ) @@ -1208,12 +1247,14 @@ req->id = i; translate_req_to_pfn(&rec_ring[i], req); } + printk("cwc:%s: post 2\n", __FUNCTION__); /* Stage 3 : Set up free list. */ for ( ; i < BLK_RING_SIZE; i++ ) rec_ring[i].id = i+1; rec_ring_free = blk_ring.req_prod_pvt; rec_ring[BLK_RING_SIZE-1].id = 0x0fffffff; + printk("cwc:%s: post 3\n", __FUNCTION__); /* blk_ring->req_prod will be set when we flush_requests().*/ wmb(); @@ -1229,6 +1270,7 @@ /* Now safe to left other peope use interface. */ blkif_state = BLKIF_STATE_CONNECTED; + printk("cwc:%s: out\n", __FUNCTION__); } static void blkif_connect(blkif_fe_interface_status_t *status) @@ -1240,6 +1282,7 @@ #ifdef CONFIG_XEN_BLKDEV_GRANT rdomid = status->domid; #endif + printk("cwc:%s: in\n", __FUNCTION__); err = request_irq(blkif_irq, blkif_int, SA_SAMPLE_RANDOM, "blkif", NULL); if ( err ) @@ -1250,6 +1293,7 @@ if ( recovery ) { + printk("cwc:%s: recovery\n", __FUNCTION__); blkif_recover(); } else @@ -1257,15 +1301,19 @@ /* Transition to connected in case we need to do * a partition probe on a whole disk. */ blkif_state = BLKIF_STATE_CONNECTED; + printk("cwc:%s: connected\n", __FUNCTION__); /* Probe for discs attached to the interface. */ xlvbd_init(); } /* Kick pending requests. */ + printk("cwc:%s: pre spin_lock_irq\n", __FUNCTION__); spin_lock_irq(&blkif_io_lock); + printk("cwc:%s: post spin_lock_irq\n", __FUNCTION__); kick_pending_request_queues(); spin_unlock_irq(&blkif_io_lock); + printk("cwc:%s: spin_unlock_irq\n", __FUNCTION__); } static void unexpected(blkif_fe_interface_status_t *status) @@ -1282,6 +1330,7 @@ unexpected(status); return; } + printk("cwc:%s: status: %u\n", __FUNCTION__, status->status); switch ( status->status ) { @@ -1353,6 +1402,7 @@ static void blkif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id) { + printk("cwc:%s: called\n", __FUNCTION__); switch ( msg->subtype ) { case CMSG_BLKIF_FE_INTERFACE_STATUS: @@ -1371,6 +1421,7 @@ { int err = 0; int i; + printk("cwc:%s: in\n", __FUNCTION__); send_driver_status(1); /* @@ -1389,6 +1440,7 @@ printk(KERN_INFO "xen_blk: Timeout connecting to device!\n"); err = -ENOSYS; } + printk("cwc:%s: out\n", __FUNCTION__); return err; } @@ -1424,16 +1476,19 @@ void blkdev_suspend(void) { + printk("cwc:%s: called\n", __FUNCTION__); } void blkdev_resume(void) { + printk("cwc:%s: called\n", __FUNCTION__); send_driver_status(1); } void blkif_completion(blkif_request_t *req) { int i; + printk("cwc:%s: in\n", __FUNCTION__); #ifdef CONFIG_XEN_BLKDEV_GRANT grant_ref_t gref; @@ -1456,4 +1511,5 @@ break; } #endif + printk("cwc:%s: out\n", __FUNCTION__); } ===== xen/common/grant_table.c 1.33 vs edited ===== --- 1.33/xen/common/grant_table.c 2005-04-14 13:53:23 -07:00 +++ edited/xen/common/grant_table.c 2005-04-17 22:18:32 -07:00 @@ -22,8 +22,8 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#define GRANT_DEBUG 0 -#define GRANT_DEBUG_VERBOSE 0 +#define GRANT_DEBUG 1 +#define GRANT_DEBUG_VERBOSE 1 #include #include @@ -104,7 +104,9 @@ act = &granting_d->grant_table->active[ref]; sha = &granting_d->grant_table->shared[ref]; + printk("cwc:%s: pre spin_lock\n", __FUNCTION__); spin_lock(&granting_d->grant_table->lock); + printk("cwc:%s: post spin_lock\n", __FUNCTION__); if ( act->pin == 0 ) { @@ -248,6 +250,7 @@ */ spin_unlock(&granting_d->grant_table->lock); + printk("cwc:%s: spin_unlock\n", __FUNCTION__); if ( (host_virt_addr != 0) && (dev_hst_ro_flags & GNTMAP_host_map) ) { @@ -269,7 +272,9 @@ { /* Abort. */ + printk("cwc:%s: pre spin_lock 2\n", __FUNCTION__); spin_lock(&granting_d->grant_table->lock); + printk("cwc:%s: post spin_lock 2\n", __FUNCTION__); if ( dev_hst_ro_flags & GNTMAP_readonly ) act->pin -= GNTPIN_hstr_inc; @@ -289,14 +294,17 @@ } spin_unlock(&granting_d->grant_table->lock); + printk("cwc:%s: spin_unlock 2\n", __FUNCTION__); } } *pframe = frame; + printk("cwc:%s: out\n", __FUNCTION__); return rc; unlock_out: spin_unlock(&granting_d->grant_table->lock); + printk("cwc:%s: spin_unlock out\n", __FUNCTION__); return rc; } @@ -507,7 +515,9 @@ act = &rd->grant_table->active[ref]; sha = &rd->grant_table->shared[ref]; + printk("cwc:%s: pre spin_lock\n", __FUNCTION__); spin_lock(&rd->grant_table->lock); + printk("cwc:%s: post spin_lock\n", __FUNCTION__); if ( frame == 0 ) frame = act->frame; @@ -562,6 +572,7 @@ goto unmap_out; } + printk("cwc:%s: pre pagetable delete\n", __FUNCTION__); /* Delete pagetable entry */ if ( unlikely(__put_user(0, (unsigned long *)pl1e))) @@ -588,6 +599,7 @@ rc = 0; *va = virt; + printk("cwc:%s: post pagetable delete\n", __FUNCTION__); } if ( (map->ref_and_flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0) @@ -617,6 +629,7 @@ unmap_out: (void)__put_user(rc, &uop->status); + printk("cwc:%s: out spin_unlock\n", __FUNCTION__); spin_unlock(&rd->grant_table->lock); put_domain(rd); return rc; @@ -897,11 +910,14 @@ ref = (map->ref_and_flags >> MAPTRACK_REF_SHIFT); act = &rgt->active[ref]; + printk("cwc:%s: pre spin_lock\n", __FUNCTION__); spin_lock(&rgt->lock); + printk("cwc:%s: post spin_lock\n", __FUNCTION__); if ( act->frame != frame ) { spin_unlock(&rgt->lock); + printk("cwc:%s: not frame spin_unlock\n", __FUNCTION__); continue; } @@ -910,6 +926,7 @@ if ( refcount == 0 ) { spin_unlock(&rgt->lock); + printk("cwc:%s: refcount 0 spin_unlock\n", __FUNCTION__); continue; } @@ -937,6 +954,7 @@ put_page(&frame_table[frame]); } spin_unlock(&rgt->lock); + printk("cwc:%s: spin_unlock\n", __FUNCTION__); clear_bit(GNTMAP_host_map, &map->ref_and_flags); @@ -949,6 +967,7 @@ } put_domain(rd); + printk("cwc:%s: out found:%d\n", __FUNCTION__, found); return found; } @@ -974,13 +993,16 @@ return 0; } + printk("cwc:%s: pre spin_lock\n", __FUNCTION__); spin_lock(&rgt->lock); + printk("cwc:%s: post spin_lock\n", __FUNCTION__); sha = &rgt->shared[ref]; sflags = sha->flags; sdom = sha->domid; + printk("cwc:%s: pre cmpxchg loop\n", __FUNCTION__); for ( ; ; ) { target_pfn = sha->frame; @@ -1028,10 +1050,12 @@ } spin_unlock(&rgt->lock); + printk("cwc:%s: out spin_unlock 1\n", __FUNCTION__); return 1; fail: spin_unlock(&rgt->lock); + printk("cwc:%s: out spin_unlock 0\n", __FUNCTION__); return 0; }