volatile unsigned char * test_data = 0;
static int blkif_queue_request(struct request *req)
{
struct blkfront_info *info = req->rq_disk->private_data;
unsigned long buffer_mfn;
blkif_request_t *ring_req;
struct bio *bio;
struct bio_vec *bvec;
int idx;
unsigned long id;
unsigned int fsect, lsect;
int ref;
grant_ref_t gref_head;
int i=0;
if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
return 1;
if (gnttab_alloc_grant_references(
BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
gnttab_request_free_callback(
&info->callback,
blkif_restart_queue_callback,
info,
BLKIF_MAX_SEGMENTS_PER_REQUEST);
return 1;
}
ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
id = GET_ID_FROM_FREELIST(info);
info->shadow[id].request = (unsigned long)req;
ring_req->id = id;
ring_req->sector_number = (blkif_sector_t)0;
ring_req->handle = info->handle;
ring_req->operation = rq_data_dir(req) ?
BLKIF_OP_WRITE : BLKIF_OP_READ;
if (blk_barrier_rq(req))
ring_req->operation = BLKIF_OP_WRITE_BARRIER;
ring_req->nr_segments = 0;
rq_for_each_bio (bio, req) {
bio_for_each_segment (bvec, bio, idx) {
BUG_ON(ring_req->nr_segments
== BLKIF_MAX_SEGMENTS_PER_REQUEST);
buffer_mfn = page_to_phys(bvec->bv_page) >> PAGE_SHIFT;
fsect = bvec->bv_offset >> 9;
lsect = fsect + (bvec->bv_len >> 9) - 1;
/* install a grant reference. */
ref = gnttab_claim_grant_reference(&gref_head);
BUG_ON(ref == -ENOSPC);
gnttab_grant_foreign_access_ref(
ref,
info->xbdev->otherend_id,
buffer_mfn,
rq_data_dir(req) );
info->shadow[id].frame[ring_req->nr_segments] =
mfn_to_pfn(buffer_mfn);
ring_req->seg[ring_req->nr_segments] =
(struct blkif_request_segment) {
.gref = ref,
.first_sect = fsect,
.last_sect = lsect };
ring_req->nr_segments++;
if(ring_req->nr_segments == 1)
goto end;
}
}
end:
if(!test_data){
test_data = ((char*)(bvec->bv_page)) + bvec->bv_offset;
printk("buffer_mfn = %d, buffer_mfn address = 0x%x\n", buffer_mfn, test_data);
for(i=0;i<256;i++){
printk("%02x ", *(test_data + i));
if(i%16 == 15)
printk("\n");
}
}
info->ring.req_prod_pvt++;
/* Keep a private copy so we can reissue requests when recovering. */
info->shadow[id].req = *ring_req;
gnttab_free_grant_references(gref_head);
return 0;
}
static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
{
struct request *req;
blkif_response_t *bret;
RING_IDX i, rp;
unsigned long flags;
struct blkfront_info *info = (struct blkfront_info *)dev_id;
int uptodate;
spin_lock_irqsave(&blkif_io_lock, flags);
if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
spin_unlock_irqrestore(&blkif_io_lock, flags);
return IRQ_HANDLED;
}
again:
rp = info->ring.sring->rsp_prod;
rmb(); /* Ensure we see queued responses up to 'rp'. */
for (i = info->ring.rsp_cons; i != rp; i++) {
unsigned long id;
int ret;
bret = RING_GET_RESPONSE(&info->ring, i);
id = bret->id;
req = (struct request *)info->shadow[id].request;
blkif_completion(&info->shadow[id]);
ADD_ID_TO_FREELIST(info, id);
uptodate = (bret->status == BLKIF_RSP_OKAY);
switch (bret->operation) {
case BLKIF_OP_WRITE_BARRIER:
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
printk("blkfront: %s: write barrier op failed\n",
info->gd->disk_name);
uptodate = -EOPNOTSUPP;
info->feature_barrier = 0;
xlvbd_barrier(info);
}
/* fall through */
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
if (unlikely(bret->status != BLKIF_RSP_OKAY))
DPRINTK("Bad return from blkdev data "
"request: %x\n", bret->status);
ret = end_that_request_first(req, uptodate,
req->hard_nr_sectors);
BUG_ON(ret);
end_that_request_last(req, uptodate);
break;
default:
BUG();
}
}
info->ring.rsp_cons = i;
if (i != info->ring.req_prod_pvt) {
int more_to_do;
RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
if (more_to_do)
goto again;
} else
info->ring.sring->rsp_event = i + 1;
kick_pending_request_queues(info);
spin_unlock_irqrestore(&blkif_io_lock, flags);
printk("blk int\n");
for(i=0;i<256;i++){
printk("%02x ", *(test_data + i));
if(i%16 == 15)
printk("\n");
}
while(1);
return IRQ_HANDLED;
}