# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID d7c794130ac54f2d9e06e792361ba62fc92cfb4f
# Parent 4cff74aa624664beeed306a199200ddfbcb51f95
Indentation cleanups in linux driver code.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
diff -r 4cff74aa6246 -r d7c794130ac5
linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c
--- a/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c Thu Sep 22
13:01:01 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c Thu Sep 22
13:04:14 2005
@@ -28,12 +28,12 @@
#define BATCH_PER_DOMAIN 16
static unsigned long mmap_vstart;
-#define MMAP_PAGES \
- (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
-#define MMAP_VADDR(_req,_seg) \
- (mmap_vstart + \
- ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) + \
- ((_seg) * PAGE_SIZE))
+#define MMAP_PAGES \
+ (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
+#define MMAP_VADDR(_req,_seg) \
+ (mmap_vstart + \
+ ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) + \
+ ((_seg) * PAGE_SIZE))
/*
* Each outstanding request that we've passed to the lower device layers has a
@@ -42,12 +42,12 @@
* response queued for it, with the saved 'id' passed back.
*/
typedef struct {
- blkif_t *blkif;
- unsigned long id;
- int nr_pages;
- atomic_t pendcnt;
- unsigned short operation;
- int status;
+ blkif_t *blkif;
+ unsigned long id;
+ int nr_pages;
+ atomic_t pendcnt;
+ unsigned short operation;
+ int status;
} pending_req_t;
/*
@@ -68,14 +68,13 @@
static request_queue_t *plugged_queue;
static inline void flush_plugged_queue(void)
{
- request_queue_t *q = plugged_queue;
- if ( q != NULL )
- {
- if ( q->unplug_fn != NULL )
- q->unplug_fn(q);
- blk_put_queue(q);
- plugged_queue = NULL;
- }
+ request_queue_t *q = plugged_queue;
+ if (q != NULL) {
+ if ( q->unplug_fn != NULL )
+ q->unplug_fn(q);
+ blk_put_queue(q);
+ plugged_queue = NULL;
+ }
}
/* When using grant tables to map a frame for device access then the
@@ -106,24 +105,23 @@
static void fast_flush_area(int idx, int nr_pages)
{
- struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
- unsigned int i, invcount = 0;
- u16 handle;
-
- for ( i = 0; i < nr_pages; i++ )
- {
- if ( BLKBACK_INVALID_HANDLE != ( handle = pending_handle(idx, i) ) )
- {
- unmap[i].host_addr = MMAP_VADDR(idx, i);
- unmap[i].dev_bus_addr = 0;
- unmap[i].handle = handle;
- pending_handle(idx, i) = BLKBACK_INVALID_HANDLE;
- invcount++;
- }
- }
- if ( unlikely(HYPERVISOR_grant_table_op(
- GNTTABOP_unmap_grant_ref, unmap, invcount)))
- BUG();
+ struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ unsigned int i, invcount = 0;
+ u16 handle;
+
+ for (i = 0; i < nr_pages; i++) {
+ handle = pending_handle(idx, i);
+ if (handle == BLKBACK_INVALID_HANDLE)
+ continue;
+ unmap[i].host_addr = MMAP_VADDR(idx, i);
+ unmap[i].dev_bus_addr = 0;
+ unmap[i].handle = handle;
+ pending_handle(idx, i) = BLKBACK_INVALID_HANDLE;
+ invcount++;
+ }
+
+ BUG_ON(HYPERVISOR_grant_table_op(
+ GNTTABOP_unmap_grant_ref, unmap, invcount));
}
@@ -136,34 +134,38 @@
static int __on_blkdev_list(blkif_t *blkif)
{
- return blkif->blkdev_list.next != NULL;
+ return blkif->blkdev_list.next != NULL;
}
static void remove_from_blkdev_list(blkif_t *blkif)
{
- unsigned long flags;
- if ( !__on_blkdev_list(blkif) ) return;
- spin_lock_irqsave(&blkio_schedule_list_lock, flags);
- if ( __on_blkdev_list(blkif) )
- {
- list_del(&blkif->blkdev_list);
- blkif->blkdev_list.next = NULL;
- blkif_put(blkif);
- }
- spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
+ unsigned long flags;
+
+ if (!__on_blkdev_list(blkif))
+ return;
+
+ spin_lock_irqsave(&blkio_schedule_list_lock, flags);
+ if (__on_blkdev_list(blkif)) {
+ list_del(&blkif->blkdev_list);
+ blkif->blkdev_list.next = NULL;
+ blkif_put(blkif);
+ }
+ spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
}
static void add_to_blkdev_list_tail(blkif_t *blkif)
{
- unsigned long flags;
- if ( __on_blkdev_list(blkif) ) return;
- spin_lock_irqsave(&blkio_schedule_list_lock, flags);
- if ( !__on_blkdev_list(blkif) && (blkif->status == CONNECTED) )
- {
- list_add_tail(&blkif->blkdev_list, &blkio_schedule_list);
- blkif_get(blkif);
- }
- spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
+ unsigned long flags;
+
+ if (__on_blkdev_list(blkif))
+ return;
+
+ spin_lock_irqsave(&blkio_schedule_list_lock, flags);
+ if (!__on_blkdev_list(blkif) && (blkif->status == CONNECTED)) {
+ list_add_tail(&blkif->blkdev_list, &blkio_schedule_list);
+ blkif_get(blkif);
+ }
+ spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
}
@@ -175,54 +177,53 @@
static int blkio_schedule(void *arg)
{
- DECLARE_WAITQUEUE(wq, current);
-
- blkif_t *blkif;
- struct list_head *ent;
-
- daemonize("xenblkd");
-
- for ( ; ; )
- {
- /* Wait for work to do. */
- add_wait_queue(&blkio_schedule_wait, &wq);
- set_current_state(TASK_INTERRUPTIBLE);
- if ( (NR_PENDING_REQS == MAX_PENDING_REQS) ||
- list_empty(&blkio_schedule_list) )
- schedule();
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&blkio_schedule_wait, &wq);
-
- /* Queue up a batch of requests. */
- while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
- !list_empty(&blkio_schedule_list) )
- {
- ent = blkio_schedule_list.next;
- blkif = list_entry(ent, blkif_t, blkdev_list);
- blkif_get(blkif);
- remove_from_blkdev_list(blkif);
- if ( do_block_io_op(blkif, BATCH_PER_DOMAIN) )
- add_to_blkdev_list_tail(blkif);
- blkif_put(blkif);
- }
-
- /* Push the batch through to disc. */
- flush_plugged_queue();
- }
+ DECLARE_WAITQUEUE(wq, current);
+
+ blkif_t *blkif;
+ struct list_head *ent;
+
+ daemonize("xenblkd");
+
+ for (;;) {
+ /* Wait for work to do. */
+ add_wait_queue(&blkio_schedule_wait, &wq);
+ set_current_state(TASK_INTERRUPTIBLE);
+ if ( (NR_PENDING_REQS == MAX_PENDING_REQS) ||
+ list_empty(&blkio_schedule_list) )
+ schedule();
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&blkio_schedule_wait, &wq);
+
+ /* Queue up a batch of requests. */
+ while ((NR_PENDING_REQS < MAX_PENDING_REQS) &&
+ !list_empty(&blkio_schedule_list)) {
+ ent = blkio_schedule_list.next;
+ blkif = list_entry(ent, blkif_t, blkdev_list);
+ blkif_get(blkif);
+ remove_from_blkdev_list(blkif);
+ if (do_block_io_op(blkif, BATCH_PER_DOMAIN))
+ add_to_blkdev_list_tail(blkif);
+ blkif_put(blkif);
+ }
+
+ /* Push the batch through to disc. */
+ flush_plugged_queue();
+ }
}
static void maybe_trigger_blkio_schedule(void)
{
- /*
- * Needed so that two processes, who together make the following predicate
- * true, don't both read stale values and evaluate the predicate
- * incorrectly. Incredibly unlikely to stall the scheduler on x86, but...
- */
- smp_mb();
-
- if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
- !list_empty(&blkio_schedule_list) )
- wake_up(&blkio_schedule_wait);
+ /*
+ * Needed so that two processes, which together make the following
+ * predicate true, don't both read stale values and evaluate the
+ * predicate incorrectly. Incredibly unlikely to stall the scheduler
+ * on x86, but...
+ */
+ smp_mb();
+
+ if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
+ !list_empty(&blkio_schedule_list))
+ wake_up(&blkio_schedule_wait);
}
@@ -233,36 +234,34 @@
static void __end_block_io_op(pending_req_t *pending_req, int uptodate)
{
- unsigned long flags;
-
- /* An error fails the entire request. */
- if ( !uptodate )
- {
- DPRINTK("Buffer not up-to-date at end of operation\n");
- pending_req->status = BLKIF_RSP_ERROR;
- }
-
- if ( atomic_dec_and_test(&pending_req->pendcnt) )
- {
- int pending_idx = pending_req - pending_reqs;
- fast_flush_area(pending_idx, pending_req->nr_pages);
- make_response(pending_req->blkif, pending_req->id,
- pending_req->operation, pending_req->status);
- blkif_put(pending_req->blkif);
- spin_lock_irqsave(&pend_prod_lock, flags);
- pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
- spin_unlock_irqrestore(&pend_prod_lock, flags);
- maybe_trigger_blkio_schedule();
- }
+ unsigned long flags;
+
+ /* An error fails the entire request. */
+ if (!uptodate) {
+ DPRINTK("Buffer not up-to-date at end of operation\n");
+ pending_req->status = BLKIF_RSP_ERROR;
+ }
+
+ if (atomic_dec_and_test(&pending_req->pendcnt)) {
+ int pending_idx = pending_req - pending_reqs;
+ fast_flush_area(pending_idx, pending_req->nr_pages);
+ make_response(pending_req->blkif, pending_req->id,
+ pending_req->operation, pending_req->status);
+ blkif_put(pending_req->blkif);
+ spin_lock_irqsave(&pend_prod_lock, flags);
+ pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
+ spin_unlock_irqrestore(&pend_prod_lock, flags);
+ maybe_trigger_blkio_schedule();
+ }
}
static int end_block_io_op(struct bio *bio, unsigned int done, int error)
{
- if ( bio->bi_size != 0 )
- return 1;
- __end_block_io_op(bio->bi_private, !error);
- bio_put(bio);
- return error;
+ if (bio->bi_size != 0)
+ return 1;
+ __end_block_io_op(bio->bi_private, !error);
+ bio_put(bio);
+ return error;
}
@@ -272,10 +271,10 @@
irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
{
- blkif_t *blkif = dev_id;
- add_to_blkdev_list_tail(blkif);
- maybe_trigger_blkio_schedule();
- return IRQ_HANDLED;
+ blkif_t *blkif = dev_id;
+ add_to_blkdev_list_tail(blkif);
+ maybe_trigger_blkio_schedule();
+ return IRQ_HANDLED;
}
@@ -286,183 +285,174 @@
static int do_block_io_op(blkif_t *blkif, int max_to_do)
{
- blkif_back_ring_t *blk_ring = &blkif->blk_ring;
- blkif_request_t *req;
- RING_IDX i, rp;
- int more_to_do = 0;
-
- rp = blk_ring->sring->req_prod;
- rmb(); /* Ensure we see queued requests up to 'rp'. */
-
- for ( i = blk_ring->req_cons;
- (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i);
- i++ )
- {
- if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) )
- {
- more_to_do = 1;
- break;
- }
+ blkif_back_ring_t *blk_ring = &blkif->blk_ring;
+ blkif_request_t *req;
+ RING_IDX i, rp;
+ int more_to_do = 0;
+
+ rp = blk_ring->sring->req_prod;
+ rmb(); /* Ensure we see queued requests up to 'rp'. */
+
+ for (i = blk_ring->req_cons;
+ (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i);
+ i++) {
+ if ((max_to_do-- == 0) ||
+ (NR_PENDING_REQS == MAX_PENDING_REQS)) {
+ more_to_do = 1;
+ break;
+ }
- req = RING_GET_REQUEST(blk_ring, i);
- switch ( req->operation )
- {
- case BLKIF_OP_READ:
- case BLKIF_OP_WRITE:
- dispatch_rw_block_io(blkif, req);
- break;
-
- default:
- DPRINTK("error: unknown block io operation [%d]\n",
- req->operation);
- make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
- break;
- }
- }
-
- blk_ring->req_cons = i;
- return more_to_do;
+ req = RING_GET_REQUEST(blk_ring, i);
+ switch (req->operation) {
+ case BLKIF_OP_READ:
+ case BLKIF_OP_WRITE:
+ dispatch_rw_block_io(blkif, req);
+ break;
+
+ default:
+ DPRINTK("error: unknown block io operation [%d]\n",
+ req->operation);
+ make_response(blkif, req->id, req->operation,
+ BLKIF_RSP_ERROR);
+ break;
+ }
+ }
+
+ blk_ring->req_cons = i;
+ return more_to_do;
}
static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req)
{
- extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
- int operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
- unsigned long fas = 0;
- int i, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
- pending_req_t *pending_req;
- struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
- struct phys_req preq;
- struct {
- unsigned long buf; unsigned int nsec;
- } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
- unsigned int nseg;
- struct bio *bio = NULL, *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
- int nbio = 0;
- request_queue_t *q;
-
- /* Check that number of segments is sane. */
- nseg = req->nr_segments;
- if ( unlikely(nseg == 0) ||
- unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) )
- {
- DPRINTK("Bad number of segments in request (%d)\n", nseg);
- goto bad_descriptor;
- }
-
- preq.dev = req->handle;
- preq.sector_number = req->sector_number;
- preq.nr_sects = 0;
-
- for ( i = 0; i < nseg; i++ )
- {
- fas = req->frame_and_sects[i];
- seg[i].nsec = blkif_last_sect(fas) - blkif_first_sect(fas) + 1;
-
- if ( seg[i].nsec <= 0 )
- goto bad_descriptor;
- preq.nr_sects += seg[i].nsec;
-
- map[i].host_addr = MMAP_VADDR(pending_idx, i);
- map[i].dom = blkif->domid;
- map[i].ref = blkif_gref_from_fas(fas);
- map[i].flags = GNTMAP_host_map;
- if ( operation == WRITE )
- map[i].flags |= GNTMAP_readonly;
- }
-
- if ( unlikely(HYPERVISOR_grant_table_op(
- GNTTABOP_map_grant_ref, map, nseg)))
- BUG();
-
- for ( i = 0; i < nseg; i++ )
- {
- if ( unlikely(map[i].handle < 0) )
- {
- DPRINTK("invalid buffer -- could not remap it\n");
- fast_flush_area(pending_idx, nseg);
- goto bad_descriptor;
- }
-
- phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
- FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT);
-
- pending_handle(pending_idx, i) = map[i].handle;
- }
-
- for ( i = 0; i < nseg; i++ )
- {
- fas = req->frame_and_sects[i];
- seg[i].buf = map[i].dev_bus_addr | (blkif_first_sect(fas) << 9);
- }
-
- if ( vbd_translate(&preq, blkif, operation) != 0 )
- {
- DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n",
- operation == READ ? "read" : "write", preq.sector_number,
- preq.sector_number + preq.nr_sects, preq.dev);
- goto bad_descriptor;
- }
-
- pending_req = &pending_reqs[pending_idx];
- pending_req->blkif = blkif;
- pending_req->id = req->id;
- pending_req->operation = operation;
- pending_req->status = BLKIF_RSP_OKAY;
- pending_req->nr_pages = nseg;
-
- for ( i = 0; i < nseg; i++ )
- {
- if ( ((int)preq.sector_number|(int)seg[i].nsec) &
- ((bdev_hardsect_size(preq.bdev) >> 9) - 1) )
- {
- DPRINTK("Misaligned I/O request from domain %d", blkif->domid);
- goto cleanup_and_fail;
- }
-
- while ( (bio == NULL) ||
- (bio_add_page(bio,
- virt_to_page(MMAP_VADDR(pending_idx, i)),
- seg[i].nsec << 9,
- seg[i].buf & ~PAGE_MASK) == 0) )
- {
- bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i);
- if ( unlikely(bio == NULL) )
- {
- cleanup_and_fail:
- for ( i = 0; i < (nbio-1); i++ )
- bio_put(biolist[i]);
- fast_flush_area(pending_idx, nseg);
- goto bad_descriptor;
- }
+ extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
+ int operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
+ unsigned long fas = 0;
+ int i, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
+ pending_req_t *pending_req;
+ struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct phys_req preq;
+ struct {
+ unsigned long buf; unsigned int nsec;
+ } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ unsigned int nseg;
+ struct bio *bio = NULL, *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ int nbio = 0;
+ request_queue_t *q;
+
+ /* Check that number of segments is sane. */
+ nseg = req->nr_segments;
+ if (unlikely(nseg == 0) ||
+ unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
+ DPRINTK("Bad number of segments in request (%d)\n", nseg);
+ goto bad_descriptor;
+ }
+
+ preq.dev = req->handle;
+ preq.sector_number = req->sector_number;
+ preq.nr_sects = 0;
+
+ for (i = 0; i < nseg; i++) {
+ fas = req->frame_and_sects[i];
+ seg[i].nsec = blkif_last_sect(fas) - blkif_first_sect(fas) + 1;
+
+ if (seg[i].nsec <= 0)
+ goto bad_descriptor;
+ preq.nr_sects += seg[i].nsec;
+
+ map[i].host_addr = MMAP_VADDR(pending_idx, i);
+ map[i].dom = blkif->domid;
+ map[i].ref = blkif_gref_from_fas(fas);
+ map[i].flags = GNTMAP_host_map;
+ if ( operation == WRITE )
+ map[i].flags |= GNTMAP_readonly;
+ }
+
+ BUG_ON(HYPERVISOR_grant_table_op(
+ GNTTABOP_map_grant_ref, map, nseg));
+
+ for (i = 0; i < nseg; i++) {
+ if (unlikely(map[i].handle < 0)) {
+ DPRINTK("invalid buffer -- could not remap it\n");
+ fast_flush_area(pending_idx, nseg);
+ goto bad_descriptor;
+ }
+
+ phys_to_machine_mapping[__pa(MMAP_VADDR(
+ pending_idx, i)) >> PAGE_SHIFT] =
+ FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT);
+
+ pending_handle(pending_idx, i) = map[i].handle;
+ }
+
+ for (i = 0; i < nseg; i++) {
+ fas = req->frame_and_sects[i];
+ seg[i].buf = map[i].dev_bus_addr |
+ (blkif_first_sect(fas) << 9);
+ }
+
+ if (vbd_translate(&preq, blkif, operation) != 0) {
+ DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n",
+ operation == READ ? "read" : "write",
+ preq.sector_number,
+ preq.sector_number + preq.nr_sects, preq.dev);
+ goto bad_descriptor;
+ }
+
+ pending_req = &pending_reqs[pending_idx];
+ pending_req->blkif = blkif;
+ pending_req->id = req->id;
+ pending_req->operation = operation;
+ pending_req->status = BLKIF_RSP_OKAY;
+ pending_req->nr_pages = nseg;
+
+ for (i = 0; i < nseg; i++) {
+ if (((int)preq.sector_number|(int)seg[i].nsec) &
+ ((bdev_hardsect_size(preq.bdev) >> 9) - 1)) {
+ DPRINTK("Misaligned I/O request from domain %d",
+ blkif->domid);
+ goto cleanup_and_fail;
+ }
+
+ while ((bio == NULL) ||
+ (bio_add_page(bio,
+ virt_to_page(MMAP_VADDR(pending_idx, i)),
+ seg[i].nsec << 9,
+ seg[i].buf & ~PAGE_MASK) == 0)) {
+ bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i);
+ if (unlikely(bio == NULL)) {
+ cleanup_and_fail:
+ for (i = 0; i < (nbio-1); i++)
+ bio_put(biolist[i]);
+ fast_flush_area(pending_idx, nseg);
+ goto bad_descriptor;
+ }
- bio->bi_bdev = preq.bdev;
- bio->bi_private = pending_req;
- bio->bi_end_io = end_block_io_op;
- bio->bi_sector = preq.sector_number;
- }
-
- preq.sector_number += seg[i].nsec;
- }
-
- if ( (q = bdev_get_queue(bio->bi_bdev)) != plugged_queue )
- {
- flush_plugged_queue();
- blk_get_queue(q);
- plugged_queue = q;
- }
-
- atomic_set(&pending_req->pendcnt, nbio);
- pending_cons++;
- blkif_get(blkif);
-
- for ( i = 0; i < nbio; i++ )
- submit_bio(operation, biolist[i]);
-
- return;
+ bio->bi_bdev = preq.bdev;
+ bio->bi_private = pending_req;
+ bio->bi_end_io = end_block_io_op;
+ bio->bi_sector = preq.sector_number;
+ }
+
+ preq.sector_number += seg[i].nsec;
+ }
+
+ if ((q = bdev_get_queue(bio->bi_bdev)) != plugged_queue) {
+ flush_plugged_queue();
+ blk_get_queue(q);
+ plugged_queue = q;
+ }
+
+ atomic_set(&pending_req->pendcnt, nbio);
+ pending_cons++;
+ blkif_get(blkif);
+
+ for (i = 0; i < nbio; i++)
+ submit_bio(operation, biolist[i]);
+
+ return;
bad_descriptor:
- make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
+ make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
}
@@ -475,66 +465,71 @@
static void make_response(blkif_t *blkif, unsigned long id,
unsigned short op, int st)
{
- blkif_response_t *resp;
- unsigned long flags;
- blkif_back_ring_t *blk_ring = &blkif->blk_ring;
-
- /* Place on the response ring for the relevant domain. */
- spin_lock_irqsave(&blkif->blk_ring_lock, flags);
- resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
- resp->id = id;
- resp->operation = op;
- resp->status = st;
- wmb(); /* Ensure other side can see the response fields. */
- blk_ring->rsp_prod_pvt++;
- RING_PUSH_RESPONSES(blk_ring);
- spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
-
- /* Kick the relevant domain. */
- notify_via_evtchn(blkif->evtchn);
+ blkif_response_t *resp;
+ unsigned long flags;
+ blkif_back_ring_t *blk_ring = &blkif->blk_ring;
+
+ /* Place on the response ring for the relevant domain. */
+ spin_lock_irqsave(&blkif->blk_ring_lock, flags);
+ resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
+ resp->id = id;
+ resp->operation = op;
+ resp->status = st;
+ wmb(); /* Ensure other side can see the response fields. */
+ blk_ring->rsp_prod_pvt++;
+ RING_PUSH_RESPONSES(blk_ring);
+ spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
+
+ /* Kick the relevant domain. */
+ notify_via_evtchn(blkif->evtchn);
}
void blkif_deschedule(blkif_t *blkif)
{
- remove_from_blkdev_list(blkif);
+ remove_from_blkdev_list(blkif);
}
static int __init blkif_init(void)
{
- int i;
- struct page *page;
-
- if ( !(xen_start_info->flags & SIF_INITDOMAIN) &&
- !(xen_start_info->flags & SIF_BLK_BE_DOMAIN) )
- return 0;
-
- blkif_interface_init();
-
- page = balloon_alloc_empty_page_range(MMAP_PAGES);
- BUG_ON(page == NULL);
- mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
-
- pending_cons = 0;
- pending_prod = MAX_PENDING_REQS;
- memset(pending_reqs, 0, sizeof(pending_reqs));
- for ( i = 0; i < MAX_PENDING_REQS; i++ )
- pending_ring[i] = i;
+ int i;
+ struct page *page;
+
+ if (!(xen_start_info->flags & SIF_INITDOMAIN) &&
+ !(xen_start_info->flags & SIF_BLK_BE_DOMAIN))
+ return 0;
+
+ blkif_interface_init();
+
+ page = balloon_alloc_empty_page_range(MMAP_PAGES);
+ BUG_ON(page == NULL);
+ mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
+
+ pending_cons = 0;
+ pending_prod = MAX_PENDING_REQS;
+ memset(pending_reqs, 0, sizeof(pending_reqs));
+ for (i = 0; i < MAX_PENDING_REQS; i++)
+ pending_ring[i] = i;
- spin_lock_init(&blkio_schedule_list_lock);
- INIT_LIST_HEAD(&blkio_schedule_list);
-
- if ( kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0 )
- BUG();
-
- blkif_xenbus_init();
-
- memset( pending_grant_handles, BLKBACK_INVALID_HANDLE, MMAP_PAGES );
-
-#ifdef CONFIG_XEN_BLKDEV_TAP_BE
- printk(KERN_ALERT "NOTE: Blkif backend is running with tap support on!\n");
-#endif
-
- return 0;
+ spin_lock_init(&blkio_schedule_list_lock);
+ INIT_LIST_HEAD(&blkio_schedule_list);
+
+ BUG_ON(kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0);
+
+ blkif_xenbus_init();
+
+ memset(pending_grant_handles, BLKBACK_INVALID_HANDLE, MMAP_PAGES);
+
+ return 0;
}
__initcall(blkif_init);
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff -r 4cff74aa6246 -r d7c794130ac5
linux-2.6-xen-sparse/drivers/xen/blkback/common.h
--- a/linux-2.6-xen-sparse/drivers/xen/blkback/common.h Thu Sep 22 13:01:01 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkback/common.h Thu Sep 22 13:04:14 2005
@@ -31,39 +31,39 @@
#endif
struct vbd {
- blkif_vdev_t handle; /* what the domain refers to this vbd as */
- unsigned char readonly; /* Non-zero -> read-only */
- unsigned char type; /* VDISK_xxx */
- u32 pdevice; /* phys device that this vbd maps to */
- struct block_device *bdev;
+ blkif_vdev_t handle; /* what the domain refers to this vbd as */
+ unsigned char readonly; /* Non-zero -> read-only */
+ unsigned char type; /* VDISK_xxx */
+ u32 pdevice; /* phys device that this vbd maps to */
+ struct block_device *bdev;
};
typedef struct blkif_st {
- /* Unique identifier for this interface. */
- domid_t domid;
- unsigned int handle;
- /* Physical parameters of the comms window. */
- unsigned int evtchn;
- unsigned int remote_evtchn;
- /* Comms information. */
- blkif_back_ring_t blk_ring;
- struct vm_struct *blk_ring_area;
- /* VBDs attached to this interface. */
- struct vbd vbd;
- /* Private fields. */
- enum { DISCONNECTED, CONNECTED } status;
+ /* Unique identifier for this interface. */
+ domid_t domid;
+ unsigned int handle;
+ /* Physical parameters of the comms window. */
+ unsigned int evtchn;
+ unsigned int remote_evtchn;
+ /* Comms information. */
+ blkif_back_ring_t blk_ring;
+ struct vm_struct *blk_ring_area;
+ /* VBDs attached to this interface. */
+ struct vbd vbd;
+ /* Private fields. */
+ enum { DISCONNECTED, CONNECTED } status;
#ifdef CONFIG_XEN_BLKDEV_TAP_BE
- /* Is this a blktap frontend */
- unsigned int is_blktap;
+ /* Is this a blktap frontend */
+ unsigned int is_blktap;
#endif
- struct list_head blkdev_list;
- spinlock_t blk_ring_lock;
- atomic_t refcnt;
+ struct list_head blkdev_list;
+ spinlock_t blk_ring_lock;
+ atomic_t refcnt;
- struct work_struct free_work;
+ struct work_struct free_work;
- u16 shmem_handle;
- grant_ref_t shmem_ref;
+ u16 shmem_handle;
+ grant_ref_t shmem_ref;
} blkif_t;
blkif_t *alloc_blkif(domid_t domid);
@@ -71,11 +71,11 @@
int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn);
#define blkif_get(_b) (atomic_inc(&(_b)->refcnt))
-#define blkif_put(_b) \
- do { \
- if ( atomic_dec_and_test(&(_b)->refcnt) ) \
- free_blkif_callback(_b); \
- } while (0)
+#define blkif_put(_b) \
+ do { \
+ if (atomic_dec_and_test(&(_b)->refcnt)) \
+ free_blkif_callback(_b); \
+ } while (0)
/* Create a vbd. */
int vbd_create(blkif_t *blkif, blkif_vdev_t vdevice, u32 pdevice,
@@ -87,10 +87,10 @@
unsigned long vbd_secsize(struct vbd *vbd);
struct phys_req {
- unsigned short dev;
- unsigned short nr_sects;
- struct block_device *bdev;
- blkif_sector_t sector_number;
+ unsigned short dev;
+ unsigned short nr_sects;
+ struct block_device *bdev;
+ blkif_sector_t sector_number;
};
int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation);
@@ -104,3 +104,13 @@
irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
#endif /* __BLKIF__BACKEND__COMMON_H__ */
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff -r 4cff74aa6246 -r d7c794130ac5
linux-2.6-xen-sparse/drivers/xen/blkback/interface.c
--- a/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c Thu Sep 22
13:01:01 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c Thu Sep 22
13:04:14 2005
@@ -13,134 +13,144 @@
blkif_t *alloc_blkif(domid_t domid)
{
- blkif_t *blkif;
+ blkif_t *blkif;
- blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
- if (!blkif)
- return ERR_PTR(-ENOMEM);
+ blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
+ if (!blkif)
+ return ERR_PTR(-ENOMEM);
- memset(blkif, 0, sizeof(*blkif));
- blkif->domid = domid;
- blkif->status = DISCONNECTED;
- spin_lock_init(&blkif->blk_ring_lock);
- atomic_set(&blkif->refcnt, 1);
+ memset(blkif, 0, sizeof(*blkif));
+ blkif->domid = domid;
+ blkif->status = DISCONNECTED;
+ spin_lock_init(&blkif->blk_ring_lock);
+ atomic_set(&blkif->refcnt, 1);
- return blkif;
+ return blkif;
}
static int map_frontend_page(blkif_t *blkif, unsigned long shared_page)
{
- struct gnttab_map_grant_ref op;
+ struct gnttab_map_grant_ref op;
- op.host_addr = (unsigned long)blkif->blk_ring_area->addr;
- op.flags = GNTMAP_host_map;
- op.ref = shared_page;
- op.dom = blkif->domid;
+ op.host_addr = (unsigned long)blkif->blk_ring_area->addr;
+ op.flags = GNTMAP_host_map;
+ op.ref = shared_page;
+ op.dom = blkif->domid;
- lock_vm_area(blkif->blk_ring_area);
- BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1));
- unlock_vm_area(blkif->blk_ring_area);
+ lock_vm_area(blkif->blk_ring_area);
+ BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1));
+ unlock_vm_area(blkif->blk_ring_area);
- if (op.handle < 0) {
- DPRINTK(" Grant table operation failure !\n");
- return op.handle;
- }
+ if (op.handle < 0) {
+ DPRINTK(" Grant table operation failure !\n");
+ return op.handle;
+ }
- blkif->shmem_ref = shared_page;
- blkif->shmem_handle = op.handle;
+ blkif->shmem_ref = shared_page;
+ blkif->shmem_handle = op.handle;
- return 0;
+ return 0;
}
static void unmap_frontend_page(blkif_t *blkif)
{
- struct gnttab_unmap_grant_ref op;
+ struct gnttab_unmap_grant_ref op;
- op.host_addr = (unsigned long)blkif->blk_ring_area->addr;
- op.handle = blkif->shmem_handle;
- op.dev_bus_addr = 0;
+ op.host_addr = (unsigned long)blkif->blk_ring_area->addr;
+ op.handle = blkif->shmem_handle;
+ op.dev_bus_addr = 0;
- lock_vm_area(blkif->blk_ring_area);
- BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
- unlock_vm_area(blkif->blk_ring_area);
+ lock_vm_area(blkif->blk_ring_area);
+ BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
+ unlock_vm_area(blkif->blk_ring_area);
}
int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn)
{
- blkif_sring_t *sring;
- evtchn_op_t op = { .cmd = EVTCHNOP_bind_interdomain };
- int err;
+ blkif_sring_t *sring;
+ evtchn_op_t op = { .cmd = EVTCHNOP_bind_interdomain };
+ int err;
- BUG_ON(blkif->remote_evtchn);
+ BUG_ON(blkif->remote_evtchn);
- if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
- return -ENOMEM;
+ if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
+ return -ENOMEM;
- err = map_frontend_page(blkif, shared_page);
- if (err) {
- free_vm_area(blkif->blk_ring_area);
- return err;
- }
+ err = map_frontend_page(blkif, shared_page);
+ if (err) {
+ free_vm_area(blkif->blk_ring_area);
+ return err;
+ }
- op.u.bind_interdomain.dom1 = DOMID_SELF;
- op.u.bind_interdomain.dom2 = blkif->domid;
- op.u.bind_interdomain.port1 = 0;
- op.u.bind_interdomain.port2 = evtchn;
- err = HYPERVISOR_event_channel_op(&op);
- if (err) {
- unmap_frontend_page(blkif);
- free_vm_area(blkif->blk_ring_area);
- return err;
- }
+ op.u.bind_interdomain.dom1 = DOMID_SELF;
+ op.u.bind_interdomain.dom2 = blkif->domid;
+ op.u.bind_interdomain.port1 = 0;
+ op.u.bind_interdomain.port2 = evtchn;
+ err = HYPERVISOR_event_channel_op(&op);
+ if (err) {
+ unmap_frontend_page(blkif);
+ free_vm_area(blkif->blk_ring_area);
+ return err;
+ }
- blkif->evtchn = op.u.bind_interdomain.port1;
- blkif->remote_evtchn = evtchn;
+ blkif->evtchn = op.u.bind_interdomain.port1;
+ blkif->remote_evtchn = evtchn;
- sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
- SHARED_RING_INIT(sring);
- BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
+ sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
+ SHARED_RING_INIT(sring);
+ BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
- bind_evtchn_to_irqhandler(blkif->evtchn, blkif_be_int, 0, "blkif-backend",
- blkif);
- blkif->status = CONNECTED;
+ bind_evtchn_to_irqhandler(
+ blkif->evtchn, blkif_be_int, 0, "blkif-backend", blkif);
+ blkif->status = CONNECTED;
- return 0;
+ return 0;
}
static void free_blkif(void *arg)
{
- evtchn_op_t op = { .cmd = EVTCHNOP_close };
- blkif_t *blkif = (blkif_t *)arg;
+ evtchn_op_t op = { .cmd = EVTCHNOP_close };
+ blkif_t *blkif = (blkif_t *)arg;
- op.u.close.port = blkif->evtchn;
- op.u.close.dom = DOMID_SELF;
- HYPERVISOR_event_channel_op(&op);
- op.u.close.port = blkif->remote_evtchn;
- op.u.close.dom = blkif->domid;
- HYPERVISOR_event_channel_op(&op);
+ op.u.close.port = blkif->evtchn;
+ op.u.close.dom = DOMID_SELF;
+ HYPERVISOR_event_channel_op(&op);
+ op.u.close.port = blkif->remote_evtchn;
+ op.u.close.dom = blkif->domid;
+ HYPERVISOR_event_channel_op(&op);
- vbd_free(&blkif->vbd);
+ vbd_free(&blkif->vbd);
- if (blkif->evtchn)
- unbind_evtchn_from_irqhandler(blkif->evtchn, blkif);
+ if (blkif->evtchn)
+ unbind_evtchn_from_irqhandler(blkif->evtchn, blkif);
- if (blkif->blk_ring.sring) {
- unmap_frontend_page(blkif);
- free_vm_area(blkif->blk_ring_area);
- blkif->blk_ring.sring = NULL;
- }
+ if (blkif->blk_ring.sring) {
+ unmap_frontend_page(blkif);
+ free_vm_area(blkif->blk_ring_area);
+ blkif->blk_ring.sring = NULL;
+ }
- kmem_cache_free(blkif_cachep, blkif);
+ kmem_cache_free(blkif_cachep, blkif);
}
void free_blkif_callback(blkif_t *blkif)
{
- INIT_WORK(&blkif->free_work, free_blkif, (void *)blkif);
- schedule_work(&blkif->free_work);
+ INIT_WORK(&blkif->free_work, free_blkif, (void *)blkif);
+ schedule_work(&blkif->free_work);
}
void __init blkif_interface_init(void)
{
- blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t),
- 0, 0, NULL, NULL);
+ blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t),
+ 0, 0, NULL, NULL);
}
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff -r 4cff74aa6246 -r d7c794130ac5
linux-2.6-xen-sparse/drivers/xen/blkback/vbd.c
--- a/linux-2.6-xen-sparse/drivers/xen/blkback/vbd.c Thu Sep 22 13:01:01 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkback/vbd.c Thu Sep 22 13:04:14 2005
@@ -11,10 +11,10 @@
static inline dev_t vbd_map_devnum(u32 cookie)
{
- return MKDEV(BLKIF_MAJOR(cookie), BLKIF_MINOR(cookie));
+ return MKDEV(BLKIF_MAJOR(cookie), BLKIF_MINOR(cookie));
}
-#define vbd_sz(_v) ((_v)->bdev->bd_part ? \
- (_v)->bdev->bd_part->nr_sects : (_v)->bdev->bd_disk->capacity)
+#define vbd_sz(_v) ((_v)->bdev->bd_part ? \
+ (_v)->bdev->bd_part->nr_sects : (_v)->bdev->bd_disk->capacity)
#define bdev_put(_b) blkdev_put(_b)
unsigned long vbd_size(struct vbd *vbd)
@@ -35,63 +35,73 @@
int vbd_create(blkif_t *blkif, blkif_vdev_t handle,
u32 pdevice, int readonly)
{
- struct vbd *vbd;
+ struct vbd *vbd;
- vbd = &blkif->vbd;
- vbd->handle = handle;
- vbd->readonly = readonly;
- vbd->type = 0;
+ vbd = &blkif->vbd;
+ vbd->handle = handle;
+ vbd->readonly = readonly;
+ vbd->type = 0;
- vbd->pdevice = pdevice;
+ vbd->pdevice = pdevice;
- vbd->bdev = open_by_devnum(
- vbd_map_devnum(vbd->pdevice),
- vbd->readonly ? FMODE_READ : FMODE_WRITE);
- if ( IS_ERR(vbd->bdev) )
- {
- DPRINTK("vbd_creat: device %08x doesn't exist.\n", vbd->pdevice);
- return -ENOENT;
- }
+ vbd->bdev = open_by_devnum(
+ vbd_map_devnum(vbd->pdevice),
+ vbd->readonly ? FMODE_READ : FMODE_WRITE);
+ if (IS_ERR(vbd->bdev)) {
+ DPRINTK("vbd_creat: device %08x doesn't exist.\n",
+ vbd->pdevice);
+ return -ENOENT;
+ }
- if ( (vbd->bdev->bd_disk == NULL) )
- {
- DPRINTK("vbd_creat: device %08x doesn't exist.\n", vbd->pdevice);
- vbd_free(vbd);
- return -ENOENT;
- }
+ if (vbd->bdev->bd_disk == NULL) {
+ DPRINTK("vbd_creat: device %08x doesn't exist.\n",
+ vbd->pdevice);
+ vbd_free(vbd);
+ return -ENOENT;
+ }
- if ( vbd->bdev->bd_disk->flags & GENHD_FL_CD )
- vbd->type |= VDISK_CDROM;
- if ( vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE )
- vbd->type |= VDISK_REMOVABLE;
+ if (vbd->bdev->bd_disk->flags & GENHD_FL_CD)
+ vbd->type |= VDISK_CDROM;
+ if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
+ vbd->type |= VDISK_REMOVABLE;
- DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
- handle, blkif->domid);
- return 0;
+ DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
+ handle, blkif->domid);
+ return 0;
}
void vbd_free(struct vbd *vbd)
{
- if (vbd->bdev)
- bdev_put(vbd->bdev);
- vbd->bdev = NULL;
+ if (vbd->bdev)
+ bdev_put(vbd->bdev);
+ vbd->bdev = NULL;
}
int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation)
{
- struct vbd *vbd = &blkif->vbd;
- int rc = -EACCES;
+ struct vbd *vbd = &blkif->vbd;
+ int rc = -EACCES;
- if ((operation == WRITE) && vbd->readonly)
- goto out;
+ if ((operation == WRITE) && vbd->readonly)
+ goto out;
- if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd)))
- goto out;
+ if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd)))
+ goto out;
- req->dev = vbd->pdevice;
- req->bdev = vbd->bdev;
- rc = 0;
+ req->dev = vbd->pdevice;
+ req->bdev = vbd->bdev;
+ rc = 0;
out:
- return rc;
+ return rc;
}
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff -r 4cff74aa6246 -r d7c794130ac5
linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c
--- a/linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c Thu Sep 22 13:01:01 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c Thu Sep 22 13:04:14 2005
@@ -124,7 +124,7 @@
return;
-abort:
+ abort:
xenbus_transaction_end(1);
}
@@ -278,3 +278,13 @@
{
xenbus_register_backend(&blkback);
}
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff -r 4cff74aa6246 -r d7c794130ac5
linux-2.6-xen-sparse/drivers/xen/blkfront/block.h
--- a/linux-2.6-xen-sparse/drivers/xen/blkfront/block.h Thu Sep 22 13:01:01 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/block.h Thu Sep 22 13:04:14 2005
@@ -146,4 +146,15 @@
int xlvbd_add(blkif_sector_t capacity, int device,
u16 vdisk_info, u16 sector_size, struct blkfront_info *info);
void xlvbd_del(struct blkfront_info *info);
+
#endif /* __XEN_DRIVERS_BLOCK_H__ */
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff -r 4cff74aa6246 -r d7c794130ac5
linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c
--- a/linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c Thu Sep 22 13:01:01 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c Thu Sep 22 13:04:14 2005
@@ -65,7 +65,7 @@
};
static struct xlbd_major_info *major_info[NUM_IDE_MAJORS + NUM_SCSI_MAJORS +
- NUM_VBD_MAJORS];
+ NUM_VBD_MAJORS];
#define XLBD_MAJOR_IDE_START 0
#define XLBD_MAJOR_SCSI_START (NUM_IDE_MAJORS)
@@ -309,3 +309,13 @@
bdput(bd);
}
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff -r 4cff74aa6246 -r d7c794130ac5
linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c
--- a/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c Thu Sep 22 13:01:01 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c Thu Sep 22 13:04:14 2005
@@ -4,7 +4,6 @@
* This is a modified version of the block backend driver that remaps requests
* to a user-space memory region. It is intended to be used to write
* application-level servers that provide block interfaces to client VMs.
- *
*/
#include <linux/kernel.h>
@@ -67,20 +66,19 @@
static inline int BLKTAP_MODE_VALID(unsigned long arg)
{
- return (
- ( arg == BLKTAP_MODE_PASSTHROUGH ) ||
- ( arg == BLKTAP_MODE_INTERCEPT_FE ) ||
- ( arg == BLKTAP_MODE_INTERPOSE ) );
+ return ((arg == BLKTAP_MODE_PASSTHROUGH ) ||
+ (arg == BLKTAP_MODE_INTERCEPT_FE) ||
+ (arg == BLKTAP_MODE_INTERPOSE ));
/*
- return (
- ( arg == BLKTAP_MODE_PASSTHROUGH ) ||
- ( arg == BLKTAP_MODE_INTERCEPT_FE ) ||
- ( arg == BLKTAP_MODE_INTERCEPT_BE ) ||
- ( arg == BLKTAP_MODE_INTERPOSE ) ||
- ( (arg & ~BLKTAP_MODE_COPY_FE_PAGES) == BLKTAP_MODE_COPY_FE ) ||
- ( (arg & ~BLKTAP_MODE_COPY_BE_PAGES) == BLKTAP_MODE_COPY_BE ) ||
- ( (arg & ~BLKTAP_MODE_COPY_BOTH_PAGES) == BLKTAP_MODE_COPY_BOTH )
- );
+ return (
+ ( arg == BLKTAP_MODE_PASSTHROUGH ) ||
+ ( arg == BLKTAP_MODE_INTERCEPT_FE ) ||
+ ( arg == BLKTAP_MODE_INTERCEPT_BE ) ||
+ ( arg == BLKTAP_MODE_INTERPOSE ) ||
+ ( (arg & ~BLKTAP_MODE_COPY_FE_PAGES) == BLKTAP_MODE_COPY_FE ) ||
+ ( (arg & ~BLKTAP_MODE_COPY_BE_PAGES) == BLKTAP_MODE_COPY_BE ) ||
+ ( (arg & ~BLKTAP_MODE_COPY_BOTH_PAGES) == BLKTAP_MODE_COPY_BOTH )
+ );
*/
}
@@ -110,14 +108,12 @@
unsigned long rings_vstart; /* start of mmaped vma */
unsigned long user_vstart; /* start of user mappings */
-#define MMAP_PAGES \
- (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
-#define MMAP_VADDR(_start, _req,_seg) \
- (_start + \
- ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) + \
- ((_seg) * PAGE_SIZE))
-
-
+#define MMAP_PAGES \
+ (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
+#define MMAP_VADDR(_start, _req,_seg) \
+ (_start + \
+ ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) + \
+ ((_seg) * PAGE_SIZE))
/*
* Each outstanding request that we've passed to the lower device layers has a
@@ -126,12 +122,12 @@
* response queued for it, with the saved 'id' passed back.
*/
typedef struct {
- blkif_t *blkif;
- unsigned long id;
- int nr_pages;
- atomic_t pendcnt;
- unsigned short operation;
- int status;
+ blkif_t *blkif;
+ unsigned long id;
+ int nr_pages;
+ atomic_t pendcnt;
+ unsigned short operation;
+ int status;
} pending_req_t;
/*
@@ -156,17 +152,17 @@
static inline unsigned long MAKE_ID(domid_t fe_dom, PEND_RING_IDX idx)
{
- return ( (fe_dom << 16) | MASK_PEND_IDX(idx) );
+ return ((fe_dom << 16) | MASK_PEND_IDX(idx));
}
extern inline PEND_RING_IDX ID_TO_IDX(unsigned long id)
{
- return (PEND_RING_IDX)( id & 0x0000ffff );
+ return (PEND_RING_IDX)(id & 0x0000ffff);
}
extern inline domid_t ID_TO_DOM(unsigned long id)
{
- return (domid_t)(id >> 16);
+ return (domid_t)(id >> 16);
}
@@ -181,8 +177,8 @@
*/
struct grant_handle_pair
{
- u16 kernel;
- u16 user;
+ u16 kernel;
+ u16 user;
};
static struct grant_handle_pair pending_grant_handles[MMAP_PAGES];
#define pending_handle(_idx, _i) \
@@ -199,21 +195,20 @@
*/
static struct page *blktap_nopage(struct vm_area_struct *vma,
- unsigned long address,
- int *type)
-{
- /*
- * if the page has not been mapped in by the driver then generate
- * a SIGBUS to the domain.
- */
-
- force_sig(SIGBUS, current);
-
- return 0;
+ unsigned long address,
+ int *type)
+{
+ /*
+ * if the page has not been mapped in by the driver then generate
+ * a SIGBUS to the domain.
+ */
+ force_sig(SIGBUS, current);
+
+ return 0;
}
struct vm_operations_struct blktap_vm_ops = {
- nopage: blktap_nopage,
+ nopage: blktap_nopage,
};
/******************************************************************
@@ -222,44 +217,45 @@
static int blktap_open(struct inode *inode, struct file *filp)
{
- blkif_sring_t *sring;
+ blkif_sring_t *sring;
+
+ if (test_and_set_bit(0, &blktap_dev_inuse))
+ return -EBUSY;
- if ( test_and_set_bit(0, &blktap_dev_inuse) )
- return -EBUSY;
+ /* Allocate the fe ring. */
+ sring = (blkif_sring_t *)get_zeroed_page(GFP_KERNEL);
+ if (sring == NULL)
+ goto fail_nomem;
+
+ SetPageReserved(virt_to_page(sring));
- /* Allocate the fe ring. */
- sring = (blkif_sring_t *)get_zeroed_page(GFP_KERNEL);
- if (sring == NULL)
- goto fail_nomem;
-
- SetPageReserved(virt_to_page(sring));
-
- SHARED_RING_INIT(sring);
- FRONT_RING_INIT(&blktap_ufe_ring, sring, PAGE_SIZE);
-
- return 0;
+ SHARED_RING_INIT(sring);
+ FRONT_RING_INIT(&blktap_ufe_ring, sring, PAGE_SIZE);
+
+ return 0;
fail_nomem:
- return -ENOMEM;
+ return -ENOMEM;
}
static int blktap_release(struct inode *inode, struct file *filp)
{
- blktap_dev_inuse = 0;
- blktap_ring_ok = 0;
-
- /* Free the ring page. */
- ClearPageReserved(virt_to_page(blktap_ufe_ring.sring));
- free_page((unsigned long) blktap_ufe_ring.sring);
-
- /* Clear any active mappings and free foreign map table */
- if (blktap_vma != NULL) {
- zap_page_range(blktap_vma, blktap_vma->vm_start,
- blktap_vma->vm_end - blktap_vma->vm_start, NULL);
- blktap_vma = NULL;
- }
-
- return 0;
+ blktap_dev_inuse = 0;
+ blktap_ring_ok = 0;
+
+ /* Free the ring page. */
+ ClearPageReserved(virt_to_page(blktap_ufe_ring.sring));
+ free_page((unsigned long) blktap_ufe_ring.sring);
+
+ /* Clear any active mappings and free foreign map table */
+ if (blktap_vma != NULL) {
+ zap_page_range(
+ blktap_vma, blktap_vma->vm_start,
+ blktap_vma->vm_end - blktap_vma->vm_start, NULL);
+ blktap_vma = NULL;
+ }
+
+ return 0;
}
@@ -283,128 +279,124 @@
*/
static int blktap_mmap(struct file *filp, struct vm_area_struct *vma)
{
- int size;
- struct page **map;
- int i;
-
- DPRINTK(KERN_ALERT "blktap mmap (%lx, %lx)\n",
- vma->vm_start, vma->vm_end);
-
- vma->vm_flags |= VM_RESERVED;
- vma->vm_ops = &blktap_vm_ops;
-
- size = vma->vm_end - vma->vm_start;
- if ( size != ( (MMAP_PAGES + RING_PAGES) << PAGE_SHIFT ) ) {
- printk(KERN_INFO
- "blktap: you _must_ map exactly %d pages!\n",
- MMAP_PAGES + RING_PAGES);
- return -EAGAIN;
- }
-
- size >>= PAGE_SHIFT;
- DPRINTK(KERN_INFO "blktap: 2 rings + %d pages.\n", size-1);
+ int size;
+ struct page **map;
+ int i;
+
+ DPRINTK(KERN_ALERT "blktap mmap (%lx, %lx)\n",
+ vma->vm_start, vma->vm_end);
+
+ vma->vm_flags |= VM_RESERVED;
+ vma->vm_ops = &blktap_vm_ops;
+
+ size = vma->vm_end - vma->vm_start;
+ if (size != ((MMAP_PAGES + RING_PAGES) << PAGE_SHIFT)) {
+ printk(KERN_INFO
+ "blktap: you _must_ map exactly %d pages!\n",
+ MMAP_PAGES + RING_PAGES);
+ return -EAGAIN;
+ }
+
+ size >>= PAGE_SHIFT;
+ DPRINTK(KERN_INFO "blktap: 2 rings + %d pages.\n", size-1);
- rings_vstart = vma->vm_start;
- user_vstart = rings_vstart + (RING_PAGES << PAGE_SHIFT);
+ rings_vstart = vma->vm_start;
+ user_vstart = rings_vstart + (RING_PAGES << PAGE_SHIFT);
- /* Map the ring pages to the start of the region and reserve it. */
-
- /* not sure if I really need to do this... */
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- if (remap_pfn_range(vma, vma->vm_start,
- __pa(blktap_ufe_ring.sring) >> PAGE_SHIFT,
- PAGE_SIZE, vma->vm_page_prot))
- {
- WPRINTK("Mapping user ring failed!\n");
- goto fail;
- }
-
- /* Mark this VM as containing foreign pages, and set up mappings. */
- map = kmalloc(((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)
- * sizeof(struct page_struct*),
- GFP_KERNEL);
- if (map == NULL)
- {
- WPRINTK("Couldn't alloc VM_FOREIGH map.\n");
- goto fail;
- }
-
- for (i=0; i<((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); i++)
- map[i] = NULL;
+ /* Map the ring pages to the start of the region and reserve it. */
+
+ /* not sure if I really need to do this... */
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (remap_pfn_range(vma, vma->vm_start,
+ __pa(blktap_ufe_ring.sring) >> PAGE_SHIFT,
+ PAGE_SIZE, vma->vm_page_prot)) {
+ WPRINTK("Mapping user ring failed!\n");
+ goto fail;
+ }
+
+ /* Mark this VM as containing foreign pages, and set up mappings. */
+ map = kmalloc(((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)
+ * sizeof(struct page_struct*),
+ GFP_KERNEL);
+ if (map == NULL) {
+ WPRINTK("Couldn't alloc VM_FOREIGH map.\n");
+ goto fail;
+ }
+
+ for (i = 0; i < ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); i++)
+ map[i] = NULL;
- vma->vm_private_data = map;
- vma->vm_flags |= VM_FOREIGN;
-
- blktap_vma = vma;
- blktap_ring_ok = 1;
-
- return 0;
+ vma->vm_private_data = map;
+ vma->vm_flags |= VM_FOREIGN;
+
+ blktap_vma = vma;
+ blktap_ring_ok = 1;
+
+ return 0;
fail:
- /* Clear any active mappings. */
- zap_page_range(vma, vma->vm_start,
- vma->vm_end - vma->vm_start, NULL);
-
- return -ENOMEM;
+ /* Clear any active mappings. */
+ zap_page_range(vma, vma->vm_start,
+ vma->vm_end - vma->vm_start, NULL);
+
+ return -ENOMEM;
}
static int blktap_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- switch(cmd) {
- case BLKTAP_IOCTL_KICK_FE: /* There are fe messages to process. */
- return blktap_read_ufe_ring();
-
- case BLKTAP_IOCTL_SETMODE:
- if (BLKTAP_MODE_VALID(arg)) {
- blktap_mode = arg;
- /* XXX: may need to flush rings here. */
- printk(KERN_INFO "blktap: set mode to %lx\n", arg);
- return 0;
- }
- case BLKTAP_IOCTL_PRINT_IDXS:
+ switch(cmd) {
+ case BLKTAP_IOCTL_KICK_FE: /* There are fe messages to process. */
+ return blktap_read_ufe_ring();
+
+ case BLKTAP_IOCTL_SETMODE:
+ if (BLKTAP_MODE_VALID(arg)) {
+ blktap_mode = arg;
+ /* XXX: may need to flush rings here. */
+ printk(KERN_INFO "blktap: set mode to %lx\n", arg);
+ return 0;
+ }
+ case BLKTAP_IOCTL_PRINT_IDXS:
{
- //print_fe_ring_idxs();
- WPRINTK("User Rings: \n-----------\n");
- WPRINTK("UF: rsp_cons: %2d, req_prod_prv: %2d "
- "| req_prod: %2d, rsp_prod: %2d\n",
- blktap_ufe_ring.rsp_cons,
- blktap_ufe_ring.req_prod_pvt,
- blktap_ufe_ring.sring->req_prod,
- blktap_ufe_ring.sring->rsp_prod);
+ //print_fe_ring_idxs();
+ WPRINTK("User Rings: \n-----------\n");
+ WPRINTK("UF: rsp_cons: %2d, req_prod_prv: %2d "
+ "| req_prod: %2d, rsp_prod: %2d\n",
+ blktap_ufe_ring.rsp_cons,
+ blktap_ufe_ring.req_prod_pvt,
+ blktap_ufe_ring.sring->req_prod,
+ blktap_ufe_ring.sring->rsp_prod);
}
- }
- return -ENOIOCTLCMD;
+ }
+ return -ENOIOCTLCMD;
}
static unsigned int blktap_poll(struct file *file, poll_table *wait)
{
- poll_wait(file, &blktap_wait, wait);
- if ( RING_HAS_UNPUSHED_REQUESTS(&blktap_ufe_ring) )
- {
- flush_tlb_all();
-
- RING_PUSH_REQUESTS(&blktap_ufe_ring);
- return POLLIN | POLLRDNORM;
- }
-
- return 0;
+ poll_wait(file, &blktap_wait, wait);
+ if (RING_HAS_UNPUSHED_REQUESTS(&blktap_ufe_ring)) {
+ flush_tlb_all();
+ RING_PUSH_REQUESTS(&blktap_ufe_ring);
+ return POLLIN | POLLRDNORM;
+ }
+
+ return 0;
}
void blktap_kick_user(void)
{
- /* blktap_ring->req_prod = blktap_req_prod; */
- wake_up_interruptible(&blktap_wait);
+ /* blktap_ring->req_prod = blktap_req_prod; */
+ wake_up_interruptible(&blktap_wait);
}
static struct file_operations blktap_fops = {
- owner: THIS_MODULE,
- poll: blktap_poll,
- ioctl: blktap_ioctl,
- open: blktap_open,
- release: blktap_release,
- mmap: blktap_mmap,
+ owner: THIS_MODULE,
+ poll: blktap_poll,
+ ioctl: blktap_ioctl,
+ open: blktap_open,
+ release: blktap_release,
+ mmap: blktap_mmap,
};
@@ -417,44 +409,44 @@
static void fast_flush_area(int idx, int nr_pages)
{
- struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
- unsigned int i, op = 0;
- struct grant_handle_pair *handle;
- unsigned long ptep;
-
- for (i=0; i<nr_pages; i++)
- {
- handle = &pending_handle(idx, i);
- if (!BLKTAP_INVALID_HANDLE(handle))
- {
-
- unmap[op].host_addr = MMAP_VADDR(mmap_vstart, idx, i);
- unmap[op].dev_bus_addr = 0;
- unmap[op].handle = handle->kernel;
- op++;
-
- if (create_lookup_pte_addr(blktap_vma->vm_mm,
- MMAP_VADDR(user_vstart, idx, i),
- &ptep) !=0) {
- DPRINTK("Couldn't get a pte addr!\n");
- return;
- }
- unmap[op].host_addr = ptep;
- unmap[op].dev_bus_addr = 0;
- unmap[op].handle = handle->user;
- op++;
+ struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
+ unsigned int i, op = 0;
+ struct grant_handle_pair *handle;
+ unsigned long ptep;
+
+ for ( i = 0; i < nr_pages; i++)
+ {
+ handle = &pending_handle(idx, i);
+ if (BLKTAP_INVALID_HANDLE(handle))
+ continue;
+
+ unmap[op].host_addr = MMAP_VADDR(mmap_vstart, idx, i);
+ unmap[op].dev_bus_addr = 0;
+ unmap[op].handle = handle->kernel;
+ op++;
+
+ if (create_lookup_pte_addr(
+ blktap_vma->vm_mm,
+ MMAP_VADDR(user_vstart, idx, i),
+ &ptep) !=0) {
+ DPRINTK("Couldn't get a pte addr!\n");
+ return;
+ }
+ unmap[op].host_addr = ptep;
+ unmap[op].dev_bus_addr = 0;
+ unmap[op].handle = handle->user;
+ op++;
- BLKTAP_INVALIDATE_HANDLE(handle);
- }
- }
- if ( unlikely(HYPERVISOR_grant_table_op(
- GNTTABOP_unmap_grant_ref, unmap, op)))
- BUG();
-
- if (blktap_vma != NULL)
- zap_page_range(blktap_vma,
- MMAP_VADDR(user_vstart, idx, 0),
- nr_pages << PAGE_SHIFT, NULL);
+ BLKTAP_INVALIDATE_HANDLE(handle);
+ }
+
+ BUG_ON(HYPERVISOR_grant_table_op(
+ GNTTABOP_unmap_grant_ref, unmap, op));
+
+ if (blktap_vma != NULL)
+ zap_page_range(blktap_vma,
+ MMAP_VADDR(user_vstart, idx, 0),
+ nr_pages << PAGE_SHIFT, NULL);
}
/******************************************************************
@@ -466,34 +458,38 @@
static int __on_blkdev_list(blkif_t *blkif)
{
- return blkif->blkdev_list.next != NULL;
+ return blkif->blkdev_list.next != NULL;
}
static void remove_from_blkdev_list(blkif_t *blkif)
{
- unsigned long flags;
- if ( !__on_blkdev_list(blkif) ) return;
- spin_lock_irqsave(&blkio_schedule_list_lock, flags);
- if ( __on_blkdev_list(blkif) )
- {
- list_del(&blkif->blkdev_list);
- blkif->blkdev_list.next = NULL;
- blkif_put(blkif);
- }
- spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
+ unsigned long flags;
+
+ if (!__on_blkdev_list(blkif))
+ return;
+
+ spin_lock_irqsave(&blkio_schedule_list_lock, flags);
+ if (__on_blkdev_list(blkif)) {
+ list_del(&blkif->blkdev_list);
+ blkif->blkdev_list.next = NULL;
+ blkif_put(blkif);
+ }
+ spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
}
static void add_to_blkdev_list_tail(blkif_t *blkif)
{
- unsigned long flags;
- if ( __on_blkdev_list(blkif) ) return;
- spin_lock_irqsave(&blkio_schedule_list_lock, flags);
- if ( !__on_blkdev_list(blkif) && (blkif->status == CONNECTED) )
- {
- list_add_tail(&blkif->blkdev_list, &blkio_schedule_list);
- blkif_get(blkif);
- }
- spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
+ unsigned long flags;
+
+ if (__on_blkdev_list(blkif))
+ return;
+
+ spin_lock_irqsave(&blkio_schedule_list_lock, flags);
+ if (!__on_blkdev_list(blkif) && (blkif->status == CONNECTED)) {
+ list_add_tail(&blkif->blkdev_list, &blkio_schedule_list);
+ blkif_get(blkif);
+ }
+ spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
}
@@ -505,51 +501,50 @@
static int blkio_schedule(void *arg)
{
- DECLARE_WAITQUEUE(wq, current);
-
- blkif_t *blkif;
- struct list_head *ent;
-
- daemonize("xenblkd");
-
- for ( ; ; )
- {
- /* Wait for work to do. */
- add_wait_queue(&blkio_schedule_wait, &wq);
- set_current_state(TASK_INTERRUPTIBLE);
- if ( (NR_PENDING_REQS == MAX_PENDING_REQS) ||
- list_empty(&blkio_schedule_list) )
- schedule();
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&blkio_schedule_wait, &wq);
-
- /* Queue up a batch of requests. */
- while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
- !list_empty(&blkio_schedule_list) )
- {
- ent = blkio_schedule_list.next;
- blkif = list_entry(ent, blkif_t, blkdev_list);
- blkif_get(blkif);
- remove_from_blkdev_list(blkif);
- if ( do_block_io_op(blkif, BATCH_PER_DOMAIN) )
- add_to_blkdev_list_tail(blkif);
- blkif_put(blkif);
- }
- }
+ DECLARE_WAITQUEUE(wq, current);
+
+ blkif_t *blkif;
+ struct list_head *ent;
+
+ daemonize("xenblkd");
+
+ for (;;) {
+ /* Wait for work to do. */
+ add_wait_queue(&blkio_schedule_wait, &wq);
+ set_current_state(TASK_INTERRUPTIBLE);
+ if ((NR_PENDING_REQS == MAX_PENDING_REQS) ||
+ list_empty(&blkio_schedule_list))
+ schedule();
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&blkio_schedule_wait, &wq);
+
+ /* Queue up a batch of requests. */
+ while ((NR_PENDING_REQS < MAX_PENDING_REQS) &&
+ !list_empty(&blkio_schedule_list)) {
+ ent = blkio_schedule_list.next;
+ blkif = list_entry(ent, blkif_t, blkdev_list);
+ blkif_get(blkif);
+ remove_from_blkdev_list(blkif);
+ if (do_block_io_op(blkif, BATCH_PER_DOMAIN))
+ add_to_blkdev_list_tail(blkif);
+ blkif_put(blkif);
+ }
+ }
}
static void maybe_trigger_blkio_schedule(void)
{
- /*
- * Needed so that two processes, who together make the following predicate
- * true, don't both read stale values and evaluate the predicate
- * incorrectly. Incredibly unlikely to stall the scheduler on x86, but...
- */
- smp_mb();
-
- if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
- !list_empty(&blkio_schedule_list) )
- wake_up(&blkio_schedule_wait);
+ /*
+ * Needed so that two processes, who together make the following
+ * predicate true, don't both read stale values and evaluate the
+ * predicate incorrectly. Incredibly unlikely to stall the scheduler
+ * on the x86, but...
+ */
+ smp_mb();
+
+ if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
+ !list_empty(&blkio_schedule_list))
+ wake_up(&blkio_schedule_wait);
}
@@ -561,54 +556,53 @@
static int blktap_read_ufe_ring(void)
{
- /* This is called to read responses from the UFE ring. */
-
- RING_IDX i, j, rp;
- blkif_response_t *resp;
- blkif_t *blkif;
- int pending_idx;
- pending_req_t *pending_req;
- unsigned long flags;
-
- /* if we are forwarding from UFERring to FERing */
- if (blktap_mode & BLKTAP_MODE_INTERCEPT_FE) {
-
- /* for each outstanding message on the UFEring */
- rp = blktap_ufe_ring.sring->rsp_prod;
- rmb();
+ /* This is called to read responses from the UFE ring. */
+
+ RING_IDX i, j, rp;
+ blkif_response_t *resp;
+ blkif_t *blkif;
+ int pending_idx;
+ pending_req_t *pending_req;
+ unsigned long flags;
+
+ /* if we are forwarding from UFERring to FERing */
+ if (blktap_mode & BLKTAP_MODE_INTERCEPT_FE) {
+
+ /* for each outstanding message on the UFEring */
+ rp = blktap_ufe_ring.sring->rsp_prod;
+ rmb();
- for ( i = blktap_ufe_ring.rsp_cons; i != rp; i++ )
- {
- resp = RING_GET_RESPONSE(&blktap_ufe_ring, i);
- pending_idx = MASK_PEND_IDX(ID_TO_IDX(resp->id));
- pending_req = &pending_reqs[pending_idx];
+ for (i = blktap_ufe_ring.rsp_cons; i != rp; i++) {
+ resp = RING_GET_RESPONSE(&blktap_ufe_ring, i);
+ pending_idx = MASK_PEND_IDX(ID_TO_IDX(resp->id));
+ pending_req = &pending_reqs[pending_idx];
- blkif = pending_req->blkif;
- for (j = 0; j < pending_req->nr_pages; j++) {
- unsigned long vaddr;
- struct page **map = blktap_vma->vm_private_data;
- int offset;
-
- vaddr = MMAP_VADDR(user_vstart, pending_idx, j);
- offset = (vaddr - blktap_vma->vm_start) >> PAGE_SHIFT;
-
- //ClearPageReserved(virt_to_page(vaddr));
- ClearPageReserved((struct page *)map[offset]);
- map[offset] = NULL;
- }
-
- fast_flush_area(pending_idx, pending_req->nr_pages);
- make_response(blkif, pending_req->id, resp->operation,
- resp->status);
- blkif_put(pending_req->blkif);
- spin_lock_irqsave(&pend_prod_lock, flags);
- pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
- spin_unlock_irqrestore(&pend_prod_lock, flags);
- }
- blktap_ufe_ring.rsp_cons = i;
- maybe_trigger_blkio_schedule();
- }
- return 0;
+ blkif = pending_req->blkif;
+ for (j = 0; j < pending_req->nr_pages; j++) {
+ unsigned long vaddr;
+ struct page **map = blktap_vma->vm_private_data;
+ int offset;
+
+ vaddr = MMAP_VADDR(user_vstart, pending_idx,
j);
+ offset = (vaddr - blktap_vma->vm_start) >>
PAGE_SHIFT;
+
+ //ClearPageReserved(virt_to_page(vaddr));
+ ClearPageReserved((struct page *)map[offset]);
+ map[offset] = NULL;
+ }
+
+ fast_flush_area(pending_idx, pending_req->nr_pages);
+ make_response(blkif, pending_req->id, resp->operation,
+ resp->status);
+ blkif_put(pending_req->blkif);
+ spin_lock_irqsave(&pend_prod_lock, flags);
+ pending_ring[MASK_PEND_IDX(pending_prod++)] =
pending_idx;
+ spin_unlock_irqrestore(&pend_prod_lock, flags);
+ }
+ blktap_ufe_ring.rsp_cons = i;
+ maybe_trigger_blkio_schedule();
+ }
+ return 0;
}
@@ -618,10 +612,10 @@
irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
{
- blkif_t *blkif = dev_id;
- add_to_blkdev_list_tail(blkif);
- maybe_trigger_blkio_schedule();
- return IRQ_HANDLED;
+ blkif_t *blkif = dev_id;
+ add_to_blkdev_list_tail(blkif);
+ maybe_trigger_blkio_schedule();
+ return IRQ_HANDLED;
}
@@ -632,199 +626,194 @@
static int do_block_io_op(blkif_t *blkif, int max_to_do)
{
- blkif_back_ring_t *blk_ring = &blkif->blk_ring;
- blkif_request_t *req;
- RING_IDX i, rp;
- int more_to_do = 0;
+ blkif_back_ring_t *blk_ring = &blkif->blk_ring;
+ blkif_request_t *req;
+ RING_IDX i, rp;
+ int more_to_do = 0;
- rp = blk_ring->sring->req_prod;
- rmb(); /* Ensure we see queued requests up to 'rp'. */
-
- for ( i = blk_ring->req_cons;
- (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i);
- i++ )
- {
- if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) )
- {
- more_to_do = 1;
- break;
- }
+ rp = blk_ring->sring->req_prod;
+ rmb(); /* Ensure we see queued requests up to 'rp'. */
+
+ for (i = blk_ring->req_cons;
+ (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i);
+ i++ ) {
+ if ((max_to_do-- == 0) ||
+ (NR_PENDING_REQS == MAX_PENDING_REQS)) {
+ more_to_do = 1;
+ break;
+ }
- req = RING_GET_REQUEST(blk_ring, i);
- switch ( req->operation )
- {
- case BLKIF_OP_READ:
- case BLKIF_OP_WRITE:
- dispatch_rw_block_io(blkif, req);
- break;
-
- default:
- DPRINTK("error: unknown block io operation [%d]\n",
- req->operation);
- make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
- break;
- }
- }
-
- blk_ring->req_cons = i;
- blktap_kick_user();
-
- return more_to_do;
+ req = RING_GET_REQUEST(blk_ring, i);
+ switch (req->operation) {
+ case BLKIF_OP_READ:
+ case BLKIF_OP_WRITE:
+ dispatch_rw_block_io(blkif, req);
+ break;
+
+ default:
+ DPRINTK("error: unknown block io operation [%d]\n",
+ req->operation);
+ make_response(blkif, req->id, req->operation,
+ BLKIF_RSP_ERROR);
+ break;
+ }
+ }
+
+ blk_ring->req_cons = i;
+ blktap_kick_user();
+
+ return more_to_do;
}
static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req)
{
- blkif_request_t *target;
- int i, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
- pending_req_t *pending_req;
- struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
- int op, ret;
- unsigned int nseg;
-
- /* Check that number of segments is sane. */
- nseg = req->nr_segments;
- if ( unlikely(nseg == 0) ||
- unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) )
- {
- DPRINTK("Bad number of segments in request (%d)\n", nseg);
- goto bad_descriptor;
- }
-
- /* Make sure userspace is ready. */
- if (!blktap_ring_ok) {
- DPRINTK("blktap: ring not ready for requests!\n");
- goto bad_descriptor;
- }
+ blkif_request_t *target;
+ int i, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
+ pending_req_t *pending_req;
+ struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
+ int op, ret;
+ unsigned int nseg;
+
+ /* Check that number of segments is sane. */
+ nseg = req->nr_segments;
+ if (unlikely(nseg == 0) ||
+ unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
+ DPRINTK("Bad number of segments in request (%d)\n", nseg);
+ goto bad_descriptor;
+ }
+
+ /* Make sure userspace is ready. */
+ if (!blktap_ring_ok) {
+ DPRINTK("blktap: ring not ready for requests!\n");
+ goto bad_descriptor;
+ }
- if ( RING_FULL(&blktap_ufe_ring) ) {
- WPRINTK("blktap: fe_ring is full, can't add (very broken!).\n");
- goto bad_descriptor;
- }
-
- flush_cache_all(); /* a noop on intel... */
-
- /* Map the foreign pages directly in to the application */
- op = 0;
- for (i=0; i<req->nr_segments; i++) {
-
- unsigned long uvaddr;
- unsigned long kvaddr;
- unsigned long ptep;
-
- uvaddr = MMAP_VADDR(user_vstart, pending_idx, i);
- kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i);
-
- /* Map the remote page to kernel. */
- map[op].host_addr = kvaddr;
- map[op].dom = blkif->domid;
- map[op].ref = blkif_gref_from_fas(req->frame_and_sects[i]);
- map[op].flags = GNTMAP_host_map;
- /* This needs a bit more thought in terms of interposition:
- * If we want to be able to modify pages during write using
- * grant table mappings, the guest will either need to allow
- * it, or we'll need to incur a copy. Bit of an fbufs moment. ;) */
- if (req->operation == BLKIF_OP_WRITE)
- map[op].flags |= GNTMAP_readonly;
- op++;
-
- /* Now map it to user. */
- ret = create_lookup_pte_addr(blktap_vma->vm_mm, uvaddr, &ptep);
- if (ret)
- {
- DPRINTK("Couldn't get a pte addr!\n");
- fast_flush_area(pending_idx, req->nr_segments);
- goto bad_descriptor;
- }
-
- map[op].host_addr = ptep;
- map[op].dom = blkif->domid;
- map[op].ref = blkif_gref_from_fas(req->frame_and_sects[i]);
- map[op].flags = GNTMAP_host_map | GNTMAP_application_map
- | GNTMAP_contains_pte;
- /* Above interposition comment applies here as well. */
- if (req->operation == BLKIF_OP_WRITE)
- map[op].flags |= GNTMAP_readonly;
- op++;
- }
-
- if ( unlikely(HYPERVISOR_grant_table_op(
- GNTTABOP_map_grant_ref, map, op)))
- BUG();
-
- op = 0;
- for (i=0; i<(req->nr_segments*2); i+=2) {
- unsigned long uvaddr;
- unsigned long kvaddr;
- unsigned long offset;
- int cancel = 0;
-
- uvaddr = MMAP_VADDR(user_vstart, pending_idx, i/2);
- kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i/2);
-
- if ( unlikely(map[i].handle < 0) )
- {
- DPRINTK("Error on kernel grant mapping (%d)\n", map[i].handle);
- ret = map[i].handle;
- cancel = 1;
- }
-
- if ( unlikely(map[i+1].handle < 0) )
- {
- DPRINTK("Error on user grant mapping (%d)\n", map[i+1].handle);
- ret = map[i+1].handle;
- cancel = 1;
- }
-
- if (cancel)
- {
- fast_flush_area(pending_idx, req->nr_segments);
- goto bad_descriptor;
- }
-
- /* Set the necessary mappings in p2m and in the VM_FOREIGN
- * vm_area_struct to allow user vaddr -> struct page lookups
- * to work. This is needed for direct IO to foreign pages. */
- phys_to_machine_mapping[__pa(kvaddr) >> PAGE_SHIFT] =
- FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT);
-
- offset = (uvaddr - blktap_vma->vm_start) >> PAGE_SHIFT;
- ((struct page **)blktap_vma->vm_private_data)[offset] =
- pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
-
- /* Save handles for unmapping later. */
- pending_handle(pending_idx, i/2).kernel = map[i].handle;
- pending_handle(pending_idx, i/2).user = map[i+1].handle;
- }
-
- /* Mark mapped pages as reserved: */
- for ( i = 0; i < req->nr_segments; i++ )
- {
- unsigned long kvaddr;
-
- kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i);
- SetPageReserved(pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT));
- }
-
- pending_req = &pending_reqs[pending_idx];
- pending_req->blkif = blkif;
- pending_req->id = req->id;
- pending_req->operation = req->operation;
- pending_req->status = BLKIF_RSP_OKAY;
- pending_req->nr_pages = nseg;
- req->id = MAKE_ID(blkif->domid, pending_idx);
- //atomic_set(&pending_req->pendcnt, nbio);
- pending_cons++;
- blkif_get(blkif);
-
- /* Finally, write the request message to the user ring. */
- target = RING_GET_REQUEST(&blktap_ufe_ring, blktap_ufe_ring.req_prod_pvt);
- memcpy(target, req, sizeof(*req));
- blktap_ufe_ring.req_prod_pvt++;
- return;
+ if (RING_FULL(&blktap_ufe_ring)) {
+ WPRINTK("blktap: fe_ring is full, can't add "
+ "(very broken!).\n");
+ goto bad_descriptor;
+ }
+
+ flush_cache_all(); /* a noop on intel... */
+
+ /* Map the foreign pages directly in to the application */
+ op = 0;
+ for (i = 0; i < req->nr_segments; i++) {
+
+ unsigned long uvaddr;
+ unsigned long kvaddr;
+ unsigned long ptep;
+
+ uvaddr = MMAP_VADDR(user_vstart, pending_idx, i);
+ kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i);
+
+ /* Map the remote page to kernel. */
+ map[op].host_addr = kvaddr;
+ map[op].dom = blkif->domid;
+ map[op].ref = blkif_gref_from_fas(req->frame_and_sects[i]);
+ map[op].flags = GNTMAP_host_map;
+ /* This needs a bit more thought in terms of interposition:
+ * If we want to be able to modify pages during write using
+ * grant table mappings, the guest will either need to allow
+ * it, or we'll need to incur a copy. Bit of an fbufs moment.
;) */
+ if (req->operation == BLKIF_OP_WRITE)
+ map[op].flags |= GNTMAP_readonly;
+ op++;
+
+ /* Now map it to user. */
+ ret = create_lookup_pte_addr(blktap_vma->vm_mm, uvaddr, &ptep);
+ if (ret) {
+ DPRINTK("Couldn't get a pte addr!\n");
+ fast_flush_area(pending_idx, req->nr_segments);
+ goto bad_descriptor;
+ }
+
+ map[op].host_addr = ptep;
+ map[op].dom = blkif->domid;
+ map[op].ref =
blkif_gref_from_fas(req->frame_and_sects[i]);
+ map[op].flags = GNTMAP_host_map | GNTMAP_application_map
+ | GNTMAP_contains_pte;
+ /* Above interposition comment applies here as well. */
+ if (req->operation == BLKIF_OP_WRITE)
+ map[op].flags |= GNTMAP_readonly;
+ op++;
+ }
+
+ BUG_ON(HYPERVISOR_grant_table_op(
+ GNTTABOP_map_grant_ref, map, op));
+
+ op = 0;
+ for (i = 0; i < (req->nr_segments*2); i += 2) {
+ unsigned long uvaddr;
+ unsigned long kvaddr;
+ unsigned long offset;
+ int cancel = 0;
+
+ uvaddr = MMAP_VADDR(user_vstart, pending_idx, i/2);
+ kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i/2);
+
+ if (unlikely(map[i].handle < 0)) {
+ DPRINTK("Error on kernel grant mapping (%d)\n",
+ map[i].handle);
+ ret = map[i].handle;
+ cancel = 1;
+ }
+
+ if (unlikely(map[i+1].handle < 0)) {
+ DPRINTK("Error on user grant mapping (%d)\n",
+ map[i+1].handle);
+ ret = map[i+1].handle;
+ cancel = 1;
+ }
+
+ if (cancel) {
+ fast_flush_area(pending_idx, req->nr_segments);
+ goto bad_descriptor;
+ }
+
+ /* Set the necessary mappings in p2m and in the VM_FOREIGN
+ * vm_area_struct to allow user vaddr -> struct page lookups
+ * to work. This is needed for direct IO to foreign pages. */
+ phys_to_machine_mapping[__pa(kvaddr) >> PAGE_SHIFT] =
+ FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT);
+
+ offset = (uvaddr - blktap_vma->vm_start) >> PAGE_SHIFT;
+ ((struct page **)blktap_vma->vm_private_data)[offset] =
+ pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
+
+ /* Save handles for unmapping later. */
+ pending_handle(pending_idx, i/2).kernel = map[i].handle;
+ pending_handle(pending_idx, i/2).user = map[i+1].handle;
+ }
+
+ /* Mark mapped pages as reserved: */
+ for (i = 0; i < req->nr_segments; i++) {
+ unsigned long kvaddr;
+ kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i);
+ SetPageReserved(pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT));
+ }
+
+ pending_req = &pending_reqs[pending_idx];
+ pending_req->blkif = blkif;
+ pending_req->id = req->id;
+ pending_req->operation = req->operation;
+ pending_req->status = BLKIF_RSP_OKAY;
+ pending_req->nr_pages = nseg;
+ req->id = MAKE_ID(blkif->domid, pending_idx);
+ //atomic_set(&pending_req->pendcnt, nbio);
+ pending_cons++;
+ blkif_get(blkif);
+
+ /* Finally, write the request message to the user ring. */
+ target = RING_GET_REQUEST(&blktap_ufe_ring,
+ blktap_ufe_ring.req_prod_pvt);
+ memcpy(target, req, sizeof(*req));
+ blktap_ufe_ring.req_prod_pvt++;
+ return;
bad_descriptor:
- make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
+ make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
}
@@ -837,80 +826,89 @@
static void make_response(blkif_t *blkif, unsigned long id,
unsigned short op, int st)
{
- blkif_response_t *resp;
- unsigned long flags;
- blkif_back_ring_t *blk_ring = &blkif->blk_ring;
-
- /* Place on the response ring for the relevant domain. */
- spin_lock_irqsave(&blkif->blk_ring_lock, flags);
- resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
- resp->id = id;
- resp->operation = op;
- resp->status = st;
- wmb(); /* Ensure other side can see the response fields. */
- blk_ring->rsp_prod_pvt++;
- RING_PUSH_RESPONSES(blk_ring);
- spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
-
- /* Kick the relevant domain. */
- notify_via_evtchn(blkif->evtchn);
+ blkif_response_t *resp;
+ unsigned long flags;
+ blkif_back_ring_t *blk_ring = &blkif->blk_ring;
+
+ /* Place on the response ring for the relevant domain. */
+ spin_lock_irqsave(&blkif->blk_ring_lock, flags);
+ resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
+ resp->id = id;
+ resp->operation = op;
+ resp->status = st;
+ wmb(); /* Ensure other side can see the response fields. */
+ blk_ring->rsp_prod_pvt++;
+ RING_PUSH_RESPONSES(blk_ring);
+ spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
+
+ /* Kick the relevant domain. */
+ notify_via_evtchn(blkif->evtchn);
}
static struct miscdevice blktap_miscdev = {
- .minor = BLKTAP_MINOR,
- .name = "blktap",
- .fops = &blktap_fops,
- .devfs_name = "misc/blktap",
+ .minor = BLKTAP_MINOR,
+ .name = "blktap",
+ .fops = &blktap_fops,
+ .devfs_name = "misc/blktap",
};
void blkif_deschedule(blkif_t *blkif)
{
- remove_from_blkdev_list(blkif);
+ remove_from_blkdev_list(blkif);
}
static int __init blkif_init(void)
{
- int i, j, err;
- struct page *page;
+ int i, j, err;
+ struct page *page;
/*
- if ( !(xen_start_info->flags & SIF_INITDOMAIN) &&
- !(xen_start_info->flags & SIF_BLK_BE_DOMAIN) )
- return 0;
+ if ( !(xen_start_info->flags & SIF_INITDOMAIN) &&
+ !(xen_start_info->flags & SIF_BLK_BE_DOMAIN) )
+ return 0;
*/
- blkif_interface_init();
-
- page = balloon_alloc_empty_page_range(MMAP_PAGES);
- BUG_ON(page == NULL);
- mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
-
- pending_cons = 0;
- pending_prod = MAX_PENDING_REQS;
- memset(pending_reqs, 0, sizeof(pending_reqs));
- for ( i = 0; i < MAX_PENDING_REQS; i++ )
- pending_ring[i] = i;
+ blkif_interface_init();
+
+ page = balloon_alloc_empty_page_range(MMAP_PAGES);
+ BUG_ON(page == NULL);
+ mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
+
+ pending_cons = 0;
+ pending_prod = MAX_PENDING_REQS;
+ memset(pending_reqs, 0, sizeof(pending_reqs));
+ for ( i = 0; i < MAX_PENDING_REQS; i++ )
+ pending_ring[i] = i;
- spin_lock_init(&blkio_schedule_list_lock);
- INIT_LIST_HEAD(&blkio_schedule_list);
-
- if ( kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0 )
- BUG();
-
- blkif_xenbus_init();
-
- for (i=0; i<MAX_PENDING_REQS ; i++)
- for (j=0; j<BLKIF_MAX_SEGMENTS_PER_REQUEST; j++)
- BLKTAP_INVALIDATE_HANDLE(&pending_handle(i, j));
-
- err = misc_register(&blktap_miscdev);
- if ( err != 0 )
- {
- printk(KERN_ALERT "Couldn't register /dev/misc/blktap (%d)\n", err);
- return err;
- }
-
- init_waitqueue_head(&blktap_wait);
-
- return 0;
+ spin_lock_init(&blkio_schedule_list_lock);
+ INIT_LIST_HEAD(&blkio_schedule_list);
+
+ BUG_ON(kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0);
+
+ blkif_xenbus_init();
+
+ for (i = 0; i < MAX_PENDING_REQS ; i++)
+ for (j = 0; j < BLKIF_MAX_SEGMENTS_PER_REQUEST; j++)
+ BLKTAP_INVALIDATE_HANDLE(&pending_handle(i, j));
+
+ err = misc_register(&blktap_miscdev);
+ if (err != 0) {
+ printk(KERN_ALERT "Couldn't register /dev/misc/blktap (%d)\n",
+ err);
+ return err;
+ }
+
+ init_waitqueue_head(&blktap_wait);
+
+ return 0;
}
__initcall(blkif_init);
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff -r 4cff74aa6246 -r d7c794130ac5
linux-2.6-xen-sparse/drivers/xen/blktap/common.h
--- a/linux-2.6-xen-sparse/drivers/xen/blktap/common.h Thu Sep 22 13:01:01 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blktap/common.h Thu Sep 22 13:04:14 2005
@@ -33,39 +33,39 @@
#define WPRINTK(fmt, args...) printk(KERN_WARNING "blk_tap: " fmt, ##args)
struct vbd {
- blkif_vdev_t handle; /* what the domain refers to this vbd as */
- unsigned char readonly; /* Non-zero -> read-only */
- unsigned char type; /* VDISK_xxx */
- u32 pdevice; /* phys device that this vbd maps to */
- struct block_device *bdev;
+ blkif_vdev_t handle; /* what the domain refers to this vbd as */
+ unsigned char readonly; /* Non-zero -> read-only */
+ unsigned char type; /* VDISK_xxx */
+ u32 pdevice; /* phys device that this vbd maps to */
+ struct block_device *bdev;
};
typedef struct blkif_st {
- /* Unique identifier for this interface. */
- domid_t domid;
- unsigned int handle;
- /* Physical parameters of the comms window. */
- unsigned int evtchn;
- unsigned int remote_evtchn;
- /* Comms information. */
- blkif_back_ring_t blk_ring;
- struct vm_struct *blk_ring_area;
- /* VBDs attached to this interface. */
- struct vbd vbd;
- /* Private fields. */
- enum { DISCONNECTED, CONNECTED } status;
+ /* Unique identifier for this interface. */
+ domid_t domid;
+ unsigned int handle;
+ /* Physical parameters of the comms window. */
+ unsigned int evtchn;
+ unsigned int remote_evtchn;
+ /* Comms information. */
+ blkif_back_ring_t blk_ring;
+ struct vm_struct *blk_ring_area;
+ /* VBDs attached to this interface. */
+ struct vbd vbd;
+ /* Private fields. */
+ enum { DISCONNECTED, CONNECTED } status;
#ifdef CONFIG_XEN_BLKDEV_TAP_BE
- /* Is this a blktap frontend */
- unsigned int is_blktap;
+ /* Is this a blktap frontend */
+ unsigned int is_blktap;
#endif
- struct list_head blkdev_list;
- spinlock_t blk_ring_lock;
- atomic_t refcnt;
+ struct list_head blkdev_list;
+ spinlock_t blk_ring_lock;
+ atomic_t refcnt;
- struct work_struct free_work;
+ struct work_struct free_work;
- u16 shmem_handle;
- grant_ref_t shmem_ref;
+ u16 shmem_handle;
+ grant_ref_t shmem_ref;
} blkif_t;
blkif_t *alloc_blkif(domid_t domid);
@@ -89,10 +89,10 @@
unsigned long vbd_secsize(struct vbd *vbd);
struct phys_req {
- unsigned short dev;
- unsigned short nr_sects;
- struct block_device *bdev;
- blkif_sector_t sector_number;
+ unsigned short dev;
+ unsigned short nr_sects;
+ struct block_device *bdev;
+ blkif_sector_t sector_number;
};
int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation);
@@ -106,3 +106,13 @@
irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
#endif /* __BLKIF__BACKEND__COMMON_H__ */
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff -r 4cff74aa6246 -r d7c794130ac5
linux-2.6-xen-sparse/drivers/xen/blktap/xenbus.c
--- a/linux-2.6-xen-sparse/drivers/xen/blktap/xenbus.c Thu Sep 22 13:01:01 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blktap/xenbus.c Thu Sep 22 13:04:14 2005
@@ -222,3 +222,13 @@
{
xenbus_register_backend(&blkback);
}
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff -r 4cff74aa6246 -r d7c794130ac5
linux-2.6-xen-sparse/drivers/xen/console/console.c
--- a/linux-2.6-xen-sparse/drivers/xen/console/console.c Thu Sep 22
13:01:01 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/console/console.c Thu Sep 22
13:04:14 2005
@@ -75,31 +75,33 @@
static int __init xencons_setup(char *str)
{
- char *q;
- int n;
-
- if ( !strncmp(str, "ttyS", 4) )
- xc_mode = XC_SERIAL;
- else if ( !strncmp(str, "tty", 3) )
- xc_mode = XC_TTY;
- else if ( !strncmp(str, "off", 3) )
- xc_mode = XC_OFF;
-
- switch ( xc_mode )
- {
- case XC_SERIAL:
- n = simple_strtol( str+4, &q, 10 );
- if ( q > (str + 4) ) xc_num = n;
- break;
- case XC_TTY:
- n = simple_strtol( str+3, &q, 10 );
- if ( q > (str + 3) ) xc_num = n;
- break;
- default:
- break;
- }
-
- return 1;
+ char *q;
+ int n;
+
+ if (!strncmp(str, "ttyS", 4))
+ xc_mode = XC_SERIAL;
+ else if (!strncmp(str, "tty", 3))
+ xc_mode = XC_TTY;
+ else if (!strncmp(str, "off", 3))
+ xc_mode = XC_OFF;
+
+ switch ( xc_mode )
+ {
+ case XC_SERIAL:
+ n = simple_strtol(str+4, &q, 10);
+ if (q > (str + 4))
+ xc_num = n;
+ break;
+ case XC_TTY:
+ n = simple_strtol(str+3, &q, 10);
+ if (q > (str + 3))
+ xc_num = n;
+ break;
+ default:
+ break;
+ }
+
+ return 1;
}
__setup("xencons=", xencons_setup);
@@ -111,11 +113,11 @@
static int __init xencons_bufsz_setup(char *str)
{
- unsigned int goal;
- goal = simple_strtoul(str, NULL, 0);
- while ( wbuf_size < goal )
- wbuf_size <<= 1;
- return 1;
+ unsigned int goal;
+ goal = simple_strtoul(str, NULL, 0);
+ while (wbuf_size < goal)
+ wbuf_size <<= 1;
+ return 1;
}
__setup("xencons_bufsz=", xencons_bufsz_setup);
@@ -135,57 +137,55 @@
/******************** Kernel console driver ********************************/
static void kcons_write(
- struct console *c, const char *s, unsigned int count)
-{
- int i;
- unsigned long flags;
-
- spin_lock_irqsave(&xencons_lock, flags);
+ struct console *c, const char *s, unsigned int count)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&xencons_lock, flags);
- for ( i = 0; i < count; i++ )
- {
- if ( (wp - wc) >= (wbuf_size - 1) )
- break;
- if ( (wbuf[WBUF_MASK(wp++)] = s[i]) == '\n' )
- wbuf[WBUF_MASK(wp++)] = '\r';
- }
-
- __xencons_tx_flush();
-
- spin_unlock_irqrestore(&xencons_lock, flags);
+ for (i = 0; i < count; i++) {
+ if ((wp - wc) >= (wbuf_size - 1))
+ break;
+ if ((wbuf[WBUF_MASK(wp++)] = s[i]) == '\n')
+ wbuf[WBUF_MASK(wp++)] = '\r';
+ }
+
+ __xencons_tx_flush();
+
+ spin_unlock_irqrestore(&xencons_lock, flags);
}
static void kcons_write_dom0(
- struct console *c, const char *s, unsigned int count)
-{
- int rc;
-
- while ( (count > 0) &&
- ((rc = HYPERVISOR_console_io(
- CONSOLEIO_write, count, (char *)s)) > 0) )
- {
- count -= rc;
- s += rc;
- }
+ struct console *c, const char *s, unsigned int count)
+{
+ int rc;
+
+ while ((count > 0) &&
+ ((rc = HYPERVISOR_console_io(
+ CONSOLEIO_write, count, (char *)s)) > 0)) {
+ count -= rc;
+ s += rc;
+ }
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
static struct tty_driver *kcons_device(struct console *c, int *index)
{
- *index = c->index;
- return xencons_driver;
+ *index = c->index;
+ return xencons_driver;
}
#else
static kdev_t kcons_device(struct console *c)
{
- return MKDEV(TTY_MAJOR, (xc_mode == XC_SERIAL) ? 64 : 1);
+ return MKDEV(TTY_MAJOR, (xc_mode == XC_SERIAL) ? 64 : 1);
}
#endif
static struct console kcons_info = {
- .device = kcons_device,
- .flags = CON_PRINTBUFFER,
- .index = -1,
+ .device = kcons_device,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
};
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
@@ -196,44 +196,42 @@
void xen_console_init(void)
#endif
{
- if ( xen_start_info->flags & SIF_INITDOMAIN )
- {
- if ( xc_mode == XC_DEFAULT )
- xc_mode = XC_SERIAL;
- kcons_info.write = kcons_write_dom0;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- if ( xc_mode == XC_SERIAL )
- kcons_info.flags |= CON_ENABLED;
-#endif
- }
- else
- {
- if ( xc_mode == XC_DEFAULT )
- xc_mode = XC_TTY;
- kcons_info.write = kcons_write;
- }
-
- switch ( xc_mode )
- {
- case XC_SERIAL:
- strcpy(kcons_info.name, "ttyS");
- if ( xc_num == -1 ) xc_num = 0;
- break;
-
- case XC_TTY:
- strcpy(kcons_info.name, "tty");
- if ( xc_num == -1 ) xc_num = 1;
- break;
-
- default:
- return __RETCODE;
- }
-
- wbuf = alloc_bootmem(wbuf_size);
-
- register_console(&kcons_info);
-
- return __RETCODE;
+ if (xen_start_info->flags & SIF_INITDOMAIN) {
+ if (xc_mode == XC_DEFAULT)
+ xc_mode = XC_SERIAL;
+ kcons_info.write = kcons_write_dom0;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ if (xc_mode == XC_SERIAL)
+ kcons_info.flags |= CON_ENABLED;
+#endif
+ } else {
+ if (xc_mode == XC_DEFAULT)
+ xc_mode = XC_TTY;
+ kcons_info.write = kcons_write;
+ }
+
+ switch (xc_mode) {
+ case XC_SERIAL:
+ strcpy(kcons_info.name, "ttyS");
+ if (xc_num == -1)
+ xc_num = 0;
+ break;
+
+ case XC_TTY:
+ strcpy(kcons_info.name, "tty");
+ if (xc_num == -1)
+ xc_num = 1;
+ break;
+
+ default:
+ return __RETCODE;
+ }
+
+ wbuf = alloc_bootmem(wbuf_size);
+
+ register_console(&kcons_info);
+
+ return __RETCODE;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
console_initcall(xen_console_init);
@@ -246,41 +244,40 @@
asmlinkage int xprintk(const char *fmt, ...)
#endif
{
- va_list args;
- int printk_len;
- static char printk_buf[1024];
+ va_list args;
+ int printk_len;
+ static char printk_buf[1024];
- /* Emit the output into the temporary buffer */
- va_start(args, fmt);
- printk_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args);
- va_end(args);
-
- /* Send the processed output directly to Xen. */
- kcons_write_dom0(NULL, printk_buf, printk_len);
-
- return 0;
+ /* Emit the output into the temporary buffer */
+ va_start(args, fmt);
+ printk_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args);
+ va_end(args);
+
+ /* Send the processed output directly to Xen. */
+ kcons_write_dom0(NULL, printk_buf, printk_len);
+
+ return 0;
}
/*** Forcibly flush console data before dying. ***/
void xencons_force_flush(void)
{
- int sz;
-
- /* Emergency console is synchronous, so there's nothing to flush. */
- if ( xen_start_info->flags & SIF_INITDOMAIN )
- return;
-
-
- /* Spin until console data is flushed through to the domain controller. */
- while ( (wc != wp) )
- {
- int sent = 0;
- if ( (sz = wp - wc) == 0 )
- continue;
- sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
- if (sent > 0)
- wc += sent;
- }
+ int sz;
+
+ /* Emergency console is synchronous, so there's nothing to flush. */
+ if (xen_start_info->flags & SIF_INITDOMAIN)
+ return;
+
+
+ /* Spin until console data is flushed through to the daemon. */
+ while (wc != wp) {
+ int sent = 0;
+ if ((sz = wp - wc) == 0)
+ continue;
+ sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
+ if (sent > 0)
+ wc += sent;
+ }
}
@@ -305,362 +302,358 @@
/* Non-privileged receive callback. */
static void xencons_rx(char *buf, unsigned len, struct pt_regs *regs)
{
- int i;
- unsigned long flags;
-
- spin_lock_irqsave(&xencons_lock, flags);
- if ( xencons_tty != NULL )
- {
- for ( i = 0; i < len; i++ ) {
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&xencons_lock, flags);
+ if (xencons_tty == NULL)
+ goto out;
+
+ for (i = 0; i < len; i++) {
#ifdef CONFIG_MAGIC_SYSRQ
- if (sysrq_enabled) {
- if (buf[i] == '\x0f') { /* ^O */
- sysrq_requested = jiffies;
- continue; /* don't print the sysrq key */
- } else if (sysrq_requested) {
- unsigned long sysrq_timeout = sysrq_requested + HZ*2;
- sysrq_requested = 0;
- /* if it's been less than a timeout, do the sysrq */
- if (time_before(jiffies, sysrq_timeout)) {
- spin_unlock_irqrestore(&xencons_lock, flags);
- handle_sysrq(buf[i], regs, xencons_tty);
- spin_lock_irqsave(&xencons_lock, flags);
- continue;
- }
- }
- }
-#endif
- tty_insert_flip_char(xencons_tty, buf[i], 0);
- }
- tty_flip_buffer_push(xencons_tty);
- }
- spin_unlock_irqrestore(&xencons_lock, flags);
-
+ if (sysrq_enabled) {
+ if (buf[i] == '\x0f') { /* ^O */
+ sysrq_requested = jiffies;
+ continue; /* don't print the sysrq key */
+ } else if (sysrq_requested) {
+ unsigned long sysrq_timeout =
+ sysrq_requested + HZ*2;
+ sysrq_requested = 0;
+ if (time_before(jiffies, sysrq_timeout)) {
+ spin_unlock_irqrestore(
+ &xencons_lock, flags);
+ handle_sysrq(
+ buf[i], regs, xencons_tty);
+ spin_lock_irqsave(
+ &xencons_lock, flags);
+ continue;
+ }
+ }
+ }
+#endif
+ tty_insert_flip_char(xencons_tty, buf[i], 0);
+ }
+ tty_flip_buffer_push(xencons_tty);
+
+ out:
+ spin_unlock_irqrestore(&xencons_lock, flags);
}
/* Privileged and non-privileged transmit worker. */
static void __xencons_tx_flush(void)
{
- int sz, work_done = 0;
-
- if ( xen_start_info->flags & SIF_INITDOMAIN )
- {
- if ( x_char )
- {
- kcons_write_dom0(NULL, &x_char, 1);
- x_char = 0;
- work_done = 1;
- }
-
- while ( wc != wp )
- {
- sz = wp - wc;
- if ( sz > (wbuf_size - WBUF_MASK(wc)) )
- sz = wbuf_size - WBUF_MASK(wc);
- kcons_write_dom0(NULL, &wbuf[WBUF_MASK(wc)], sz);
- wc += sz;
- work_done = 1;
- }
- }
- else
- {
- while ( x_char )
- {
- if (xencons_ring_send(&x_char, 1) == 1) {
- x_char = 0;
- work_done = 1;
- }
- }
-
- while ( wc != wp )
- {
- int sent;
- sz = wp - wc;
- if ( sz > (wbuf_size - WBUF_MASK(wc)) )
- sz = wbuf_size - WBUF_MASK(wc);
- sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
- if ( sent > 0 ) {
- wc += sent;
- work_done = 1;
- }
- }
- }
-
- if ( work_done && (xencons_tty != NULL) )
- {
- wake_up_interruptible(&xencons_tty->write_wait);
- if ( (xencons_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
- (xencons_tty->ldisc.write_wakeup != NULL) )
- (xencons_tty->ldisc.write_wakeup)(xencons_tty);
- }
+ int sz, work_done = 0;
+
+ if (xen_start_info->flags & SIF_INITDOMAIN) {
+ if (x_char) {
+ kcons_write_dom0(NULL, &x_char, 1);
+ x_char = 0;
+ work_done = 1;
+ }
+
+ while (wc != wp) {
+ sz = wp - wc;
+ if (sz > (wbuf_size - WBUF_MASK(wc)))
+ sz = wbuf_size - WBUF_MASK(wc);
+ kcons_write_dom0(NULL, &wbuf[WBUF_MASK(wc)], sz);
+ wc += sz;
+ work_done = 1;
+ }
+ } else {
+ while (x_char) {
+ if (xencons_ring_send(&x_char, 1) == 1) {
+ x_char = 0;
+ work_done = 1;
+ }
+ }
+
+ while (wc != wp) {
+ int sent;
+ sz = wp - wc;
+ if (sz > (wbuf_size - WBUF_MASK(wc)))
+ sz = wbuf_size - WBUF_MASK(wc);
+ sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
+ if (sent > 0) {
+ wc += sent;
+ work_done = 1;
+ }
+ }
+ }
+
+ if (work_done && (xencons_tty != NULL))
+ {
+ wake_up_interruptible(&xencons_tty->write_wait);
+ if ((xencons_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
+ (xencons_tty->ldisc.write_wakeup != NULL))
+ (xencons_tty->ldisc.write_wakeup)(xencons_tty);
+ }
}
/* Privileged receive callback and transmit kicker. */
static irqreturn_t xencons_priv_interrupt(int irq, void *dev_id,
struct pt_regs *regs)
{
- static char rbuf[16];
- int i, l;
- unsigned long flags;
-
- spin_lock_irqsave(&xencons_lock, flags);
-
- if ( xencons_tty != NULL )
- {
- /* Receive work. */
- while ( (l = HYPERVISOR_console_io(CONSOLEIO_read, 16, rbuf)) > 0 )
- for ( i = 0; i < l; i++ )
- tty_insert_flip_char(xencons_tty, rbuf[i], 0);
- if ( xencons_tty->flip.count != 0 )
- tty_flip_buffer_push(xencons_tty);
- }
-
- /* Transmit work. */
- __xencons_tx_flush();
-
- spin_unlock_irqrestore(&xencons_lock, flags);
-
- return IRQ_HANDLED;
+ static char rbuf[16];
+ int i, l;
+ unsigned long flags;
+
+ spin_lock_irqsave(&xencons_lock, flags);
+
+ if (xencons_tty != NULL)
+ {
+ /* Receive work. */
+ while ((l = HYPERVISOR_console_io(
+ CONSOLEIO_read, 16, rbuf)) > 0)
+ for (i = 0; i < l; i++)
+ tty_insert_flip_char(xencons_tty, rbuf[i], 0);
+ if (xencons_tty->flip.count != 0)
+ tty_flip_buffer_push(xencons_tty);
+ }
+
+ /* Transmit work. */
+ __xencons_tx_flush();
+
+ spin_unlock_irqrestore(&xencons_lock, flags);
+
+ return IRQ_HANDLED;
}
static int xencons_write_room(struct tty_struct *tty)
{
- return wbuf_size - (wp - wc);
+ return wbuf_size - (wp - wc);
}
static int xencons_chars_in_buffer(struct tty_struct *tty)
{
- return wp - wc;
+ return wp - wc;
}
static void xencons_send_xchar(struct tty_struct *tty, char ch)
{
- unsigned long flags;
-
- if ( TTY_INDEX(tty) != 0 )
- return;
-
- spin_lock_irqsave(&xencons_lock, flags);
- x_char = ch;
- __xencons_tx_flush();
- spin_unlock_irqrestore(&xencons_lock, flags);
+ unsigned long flags;
+
+ if (TTY_INDEX(tty) != 0)
+ return;
+
+ spin_lock_irqsave(&xencons_lock, flags);
+ x_char = ch;
+ __xencons_tx_flush();
+ spin_unlock_irqrestore(&xencons_lock, flags);
}
static void xencons_throttle(struct tty_struct *tty)
{
- if ( TTY_INDEX(tty) != 0 )
- return;
-
- if ( I_IXOFF(tty) )
- xencons_send_xchar(tty, STOP_CHAR(tty));
+ if (TTY_INDEX(tty) != 0)
+ return;
+
+ if (I_IXOFF(tty))
+ xencons_send_xchar(tty, STOP_CHAR(tty));
}
static void xencons_unthrottle(struct tty_struct *tty)
{
- if ( TTY_INDEX(tty) != 0 )
- return;
-
- if ( I_IXOFF(tty) )
- {
- if ( x_char != 0 )
- x_char = 0;
- else
- xencons_send_xchar(tty, START_CHAR(tty));
- }
+ if (TTY_INDEX(tty) != 0)
+ return;
+
+ if (I_IXOFF(tty)) {
+ if (x_char != 0)
+ x_char = 0;
+ else
+ xencons_send_xchar(tty, START_CHAR(tty));
+ }
}
static void xencons_flush_buffer(struct tty_struct *tty)
{
- unsigned long flags;
-
- if ( TTY_INDEX(tty) != 0 )
- return;
-
- spin_lock_irqsave(&xencons_lock, flags);
- wc = wp = 0;
- spin_unlock_irqrestore(&xencons_lock, flags);
+ unsigned long flags;
+
+ if (TTY_INDEX(tty) != 0)
+ return;
+
+ spin_lock_irqsave(&xencons_lock, flags);
+ wc = wp = 0;
+ spin_unlock_irqrestore(&xencons_lock, flags);
}
static inline int __xencons_put_char(int ch)
{
- char _ch = (char)ch;
- if ( (wp - wc) == wbuf_size )
- return 0;
- wbuf[WBUF_MASK(wp++)] = _ch;
- return 1;
+ char _ch = (char)ch;
+ if ((wp - wc) == wbuf_size)
+ return 0;
+ wbuf[WBUF_MASK(wp++)] = _ch;
+ return 1;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
static int xencons_write(
- struct tty_struct *tty,
- const unsigned char *buf,
- int count)
-{
- int i;
- unsigned long flags;
-
- if ( TTY_INDEX(tty) != 0 )
- return count;
-
- spin_lock_irqsave(&xencons_lock, flags);
-
- for ( i = 0; i < count; i++ )
- if ( !__xencons_put_char(buf[i]) )
- break;
-
- if ( i != 0 )
- __xencons_tx_flush();
-
- spin_unlock_irqrestore(&xencons_lock, flags);
-
- return i;
+ struct tty_struct *tty,
+ const unsigned char *buf,
+ int count)
+{
+ int i;
+ unsigned long flags;
+
+ if (TTY_INDEX(tty) != 0)
+ return count;
+
+ spin_lock_irqsave(&xencons_lock, flags);
+
+ for (i = 0; i < count; i++)
+ if (!__xencons_put_char(buf[i]))
+ break;
+
+ if (i != 0)
+ __xencons_tx_flush();
+
+ spin_unlock_irqrestore(&xencons_lock, flags);
+
+ return i;
}
#else
static int xencons_write(
- struct tty_struct *tty,
- int from_user,
- const u_char *buf,
- int count)
-{
- int i;
- unsigned long flags;
-
- if ( from_user && verify_area(VERIFY_READ, buf, count) )
- return -EINVAL;
-
- if ( TTY_INDEX(tty) != 0 )
- return count;
-
- spin_lock_irqsave(&xencons_lock, flags);
-
- for ( i = 0; i < count; i++ )
- {
- char ch;
- if ( from_user )
- __get_user(ch, buf + i);
- else
- ch = buf[i];
- if ( !__xencons_put_char(ch) )
- break;
- }
-
- if ( i != 0 )
- __xencons_tx_flush();
-
- spin_unlock_irqrestore(&xencons_lock, flags);
-
- return i;
+ struct tty_struct *tty,
+ int from_user,
+ const u_char *buf,
+ int count)
+{
+ int i;
+ unsigned long flags;
+
+ if (from_user && verify_area(VERIFY_READ, buf, count))
+ return -EINVAL;
+
+ if (TTY_INDEX(tty) != 0)
+ return count;
+
+ spin_lock_irqsave(&xencons_lock, flags);
+
+ for (i = 0; i < count; i++) {
+ char ch;
+ if (from_user)
+ __get_user(ch, buf + i);
+ else
+ ch = buf[i];
+ if (!__xencons_put_char(ch))
+ break;
+ }
+
+ if (i != 0)
+ __xencons_tx_flush();
+
+ spin_unlock_irqrestore(&xencons_lock, flags);
+
+ return i;
}
#endif
static void xencons_put_char(struct tty_struct *tty, u_char ch)
{
- unsigned long flags;
-
- if ( TTY_INDEX(tty) != 0 )
- return;
-
- spin_lock_irqsave(&xencons_lock, flags);
- (void)__xencons_put_char(ch);
- spin_unlock_irqrestore(&xencons_lock, flags);
+ unsigned long flags;
+
+ if (TTY_INDEX(tty) != 0)
+ return;
+
+ spin_lock_irqsave(&xencons_lock, flags);
+ (void)__xencons_put_char(ch);
+ spin_unlock_irqrestore(&xencons_lock, flags);
}
static void xencons_flush_chars(struct tty_struct *tty)
{
- unsigned long flags;
-
- if ( TTY_INDEX(tty) != 0 )
- return;
-
- spin_lock_irqsave(&xencons_lock, flags);
- __xencons_tx_flush();
- spin_unlock_irqrestore(&xencons_lock, flags);
+ unsigned long flags;
+
+ if (TTY_INDEX(tty) != 0)
+ return;
+
+ spin_lock_irqsave(&xencons_lock, flags);
+ __xencons_tx_flush();
+ spin_unlock_irqrestore(&xencons_lock, flags);
}
static void xencons_wait_until_sent(struct tty_struct *tty, int timeout)
{
- unsigned long orig_jiffies = jiffies;
-
- if ( TTY_INDEX(tty) != 0 )
- return;
-
- while ( DRV(tty->driver)->chars_in_buffer(tty) )
- {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1);
- if ( signal_pending(current) )
- break;
- if ( (timeout != 0) && time_after(jiffies, orig_jiffies + timeout) )
- break;
- }
+ unsigned long orig_jiffies = jiffies;
+
+ if (TTY_INDEX(tty) != 0)
+ return;
+
+ while (DRV(tty->driver)->chars_in_buffer(tty))
+ {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(1);
+ if (signal_pending(current))
+ break;
+ if ( (timeout != 0) &&
+ time_after(jiffies, orig_jiffies + timeout) )
+ break;
+ }
- set_current_state(TASK_RUNNING);
+ set_current_state(TASK_RUNNING);
}
static int xencons_open(struct tty_struct *tty, struct file *filp)
{
- unsigned long flags;
-
- if ( TTY_INDEX(tty) != 0 )
- return 0;
-
- spin_lock_irqsave(&xencons_lock, flags);
- tty->driver_data = NULL;
- if ( xencons_tty == NULL )
- xencons_tty = tty;
- __xencons_tx_flush();
- spin_unlock_irqrestore(&xencons_lock, flags);
-
- return 0;
+ unsigned long flags;
+
+ if (TTY_INDEX(tty) != 0)
+ return 0;
+
+ spin_lock_irqsave(&xencons_lock, flags);
+ tty->driver_data = NULL;
+ if (xencons_tty == NULL)
+ xencons_tty = tty;
+ __xencons_tx_flush();
+ spin_unlock_irqrestore(&xencons_lock, flags);
+
+ return 0;
}
static void xencons_close(struct tty_struct *tty, struct file *filp)
{
- unsigned long flags;
-
- if ( TTY_INDEX(tty) != 0 )
- return;
-
- if ( tty->count == 1 )
- {
- tty->closing = 1;
- tty_wait_until_sent(tty, 0);
- if ( DRV(tty->driver)->flush_buffer != NULL )
- DRV(tty->driver)->flush_buffer(tty);
- if ( tty->ldisc.flush_buffer != NULL )
- tty->ldisc.flush_buffer(tty);
- tty->closing = 0;
- spin_lock_irqsave(&xencons_lock, flags);
- xencons_tty = NULL;
- spin_unlock_irqrestore(&xencons_lock, flags);
- }
+ unsigned long flags;
+
+ if (TTY_INDEX(tty) != 0)
+ return;
+
+ if (tty->count == 1) {
+ tty->closing = 1;
+ tty_wait_until_sent(tty, 0);
+ if (DRV(tty->driver)->flush_buffer != NULL)
+ DRV(tty->driver)->flush_buffer(tty);
+ if (tty->ldisc.flush_buffer != NULL)
+ tty->ldisc.flush_buffer(tty);
+ tty->closing = 0;
+ spin_lock_irqsave(&xencons_lock, flags);
+ xencons_tty = NULL;
+ spin_unlock_irqrestore(&xencons_lock, flags);
+ }
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
static struct tty_operations xencons_ops = {
- .open = xencons_open,
- .close = xencons_close,
- .write = xencons_write,
- .write_room = xencons_write_room,
- .put_char = xencons_put_char,
- .flush_chars = xencons_flush_chars,
- .chars_in_buffer = xencons_chars_in_buffer,
- .send_xchar = xencons_send_xchar,
- .flush_buffer = xencons_flush_buffer,
- .throttle = xencons_throttle,
- .unthrottle = xencons_unthrottle,
- .wait_until_sent = xencons_wait_until_sent,
+ .open = xencons_open,
+ .close = xencons_close,
+ .write = xencons_write,
+ .write_room = xencons_write_room,
+ .put_char = xencons_put_char,
+ .flush_chars = xencons_flush_chars,
+ .chars_in_buffer = xencons_chars_in_buffer,
+ .send_xchar = xencons_send_xchar,
+ .flush_buffer = xencons_flush_buffer,
+ .throttle = xencons_throttle,
+ .unthrottle = xencons_unthrottle,
+ .wait_until_sent = xencons_wait_until_sent,
};
#ifdef CONFIG_XEN_PRIVILEGED_GUEST
static const char *xennullcon_startup(void)
{
- return NULL;
+ return NULL;
}
static int xennullcon_dummy(void)
{
- return 0;
+ return 0;
}
#define DUMMY (void *)xennullcon_dummy
@@ -672,122 +665,128 @@
*/
const struct consw xennull_con = {
- .owner = THIS_MODULE,
- .con_startup = xennullcon_startup,
- .con_init = DUMMY,
- .con_deinit = DUMMY,
- .con_clear = DUMMY,
- .con_putc = DUMMY,
- .con_putcs = DUMMY,
- .con_cursor = DUMMY,
- .con_scroll = DUMMY,
- .con_bmove = DUMMY,
- .con_switch = DUMMY,
- .con_blank = DUMMY,
- .con_font_set = DUMMY,
- .con_font_get = DUMMY,
- .con_font_default = DUMMY,
- .con_font_copy = DUMMY,
- .con_set_palette = DUMMY,
- .con_scrolldelta = DUMMY,
+ .owner = THIS_MODULE,
+ .con_startup = xennullcon_startup,
+ .con_init = DUMMY,
+ .con_deinit = DUMMY,
+ .con_clear = DUMMY,
+ .con_putc = DUMMY,
+ .con_putcs = DUMMY,
+ .con_cursor = DUMMY,
+ .con_scroll = DUMMY,
+ .con_bmove = DUMMY,
+ .con_switch = DUMMY,
+ .con_blank = DUMMY,
+ .con_font_set = DUMMY,
+ .con_font_get = DUMMY,
+ .con_font_default = DUMMY,
+ .con_font_copy = DUMMY,
+ .con_set_palette = DUMMY,
+ .con_scrolldelta = DUMMY,
};
#endif
#endif
static int __init xencons_init(void)
{
- int rc;
-
- if ( xc_mode == XC_OFF )
- return 0;
-
- xencons_ring_init();
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- xencons_driver = alloc_tty_driver((xc_mode == XC_SERIAL) ?
- 1 : MAX_NR_CONSOLES);
- if ( xencons_driver == NULL )
- return -ENOMEM;
+ int rc;
+
+ if (xc_mode == XC_OFF)
+ return 0;
+
+ xencons_ring_init();
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ xencons_driver = alloc_tty_driver((xc_mode == XC_SERIAL) ?
+ 1 : MAX_NR_CONSOLES);
+ if (xencons_driver == NULL)
+ return -ENOMEM;
#else
- memset(&xencons_driver, 0, sizeof(struct tty_driver));
- xencons_driver.magic = TTY_DRIVER_MAGIC;
- xencons_driver.refcount = &xencons_refcount;
- xencons_driver.table = xencons_table;
- xencons_driver.num = (xc_mode == XC_SERIAL) ? 1 : MAX_NR_CONSOLES;
-#endif
-
- DRV(xencons_driver)->major = TTY_MAJOR;
- DRV(xencons_driver)->type = TTY_DRIVER_TYPE_SERIAL;
- DRV(xencons_driver)->subtype = SERIAL_TYPE_NORMAL;
- DRV(xencons_driver)->init_termios = tty_std_termios;
- DRV(xencons_driver)->flags =
- TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_NO_DEVFS;
- DRV(xencons_driver)->termios = xencons_termios;
- DRV(xencons_driver)->termios_locked = xencons_termios_locked;
-
- if ( xc_mode == XC_SERIAL )
- {
- DRV(xencons_driver)->name = "ttyS";
- DRV(xencons_driver)->minor_start = 64 + xc_num;
- DRV(xencons_driver)->name_base = 0 + xc_num;
- }
- else
- {
- DRV(xencons_driver)->name = "tty";
- DRV(xencons_driver)->minor_start = xc_num;
- DRV(xencons_driver)->name_base = xc_num;
- }
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- tty_set_operations(xencons_driver, &xencons_ops);
+ memset(&xencons_driver, 0, sizeof(struct tty_driver));
+ xencons_driver.magic = TTY_DRIVER_MAGIC;
+ xencons_driver.refcount = &xencons_refcount;
+ xencons_driver.table = xencons_table;
+ xencons_driver.num =
+ (xc_mode == XC_SERIAL) ? 1 : MAX_NR_CONSOLES;
+#endif
+
+ DRV(xencons_driver)->major = TTY_MAJOR;
+ DRV(xencons_driver)->type = TTY_DRIVER_TYPE_SERIAL;
+ DRV(xencons_driver)->subtype = SERIAL_TYPE_NORMAL;
+ DRV(xencons_driver)->init_termios = tty_std_termios;
+ DRV(xencons_driver)->flags =
+ TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_RESET_TERMIOS |
+ TTY_DRIVER_NO_DEVFS;
+ DRV(xencons_driver)->termios = xencons_termios;
+ DRV(xencons_driver)->termios_locked = xencons_termios_locked;
+
+ if (xc_mode == XC_SERIAL)
+ {
+ DRV(xencons_driver)->name = "ttyS";
+ DRV(xencons_driver)->minor_start = 64 + xc_num;
+ DRV(xencons_driver)->name_base = 0 + xc_num;
+ } else {
+ DRV(xencons_driver)->name = "tty";
+ DRV(xencons_driver)->minor_start = xc_num;
+ DRV(xencons_driver)->name_base = xc_num;
+ }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ tty_set_operations(xencons_driver, &xencons_ops);
#else
- xencons_driver.open = xencons_open;
- xencons_driver.close = xencons_close;
- xencons_driver.write = xencons_write;
- xencons_driver.write_room = xencons_write_room;
- xencons_driver.put_char = xencons_put_char;
- xencons_driver.flush_chars = xencons_flush_chars;
- xencons_driver.chars_in_buffer = xencons_chars_in_buffer;
- xencons_driver.send_xchar = xencons_send_xchar;
- xencons_driver.flush_buffer = xencons_flush_buffer;
- xencons_driver.throttle = xencons_throttle;
- xencons_driver.unthrottle = xencons_unthrottle;
- xencons_driver.wait_until_sent = xencons_wait_until_sent;
-#endif
-
- if ( (rc = tty_register_driver(DRV(xencons_driver))) != 0 )
- {
- printk("WARNING: Failed to register Xen virtual "
- "console driver as '%s%d'\n",
- DRV(xencons_driver)->name, DRV(xencons_driver)->name_base);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- put_tty_driver(xencons_driver);
- xencons_driver = NULL;
-#endif
- return rc;
- }
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- tty_register_device(xencons_driver, 0, NULL);
-#endif
-
- if ( xen_start_info->flags & SIF_INITDOMAIN )
- {
- xencons_priv_irq = bind_virq_to_irq(VIRQ_CONSOLE);
- (void)request_irq(xencons_priv_irq,
- xencons_priv_interrupt, 0, "console", NULL);
- }
- else
- {
-
- xencons_ring_register_receiver(xencons_rx);
- }
-
- printk("Xen virtual console successfully installed as %s%d\n",
- DRV(xencons_driver)->name,
- DRV(xencons_driver)->name_base );
+ xencons_driver.open = xencons_open;
+ xencons_driver.close = xencons_close;
+ xencons_driver.write = xencons_write;
+ xencons_driver.write_room = xencons_write_room;
+ xencons_driver.put_char = xencons_put_char;
+ xencons_driver.flush_chars = xencons_flush_chars;
+ xencons_driver.chars_in_buffer = xencons_chars_in_buffer;
+ xencons_driver.send_xchar = xencons_send_xchar;
+ xencons_driver.flush_buffer = xencons_flush_buffer;
+ xencons_driver.throttle = xencons_throttle;
+ xencons_driver.unthrottle = xencons_unthrottle;
+ xencons_driver.wait_until_sent = xencons_wait_until_sent;
+#endif
+
+ if ((rc = tty_register_driver(DRV(xencons_driver))) != 0) {
+ printk("WARNING: Failed to register Xen virtual "
+ "console driver as '%s%d'\n",
+ DRV(xencons_driver)->name,
DRV(xencons_driver)->name_base);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ put_tty_driver(xencons_driver);
+ xencons_driver = NULL;
+#endif
+ return rc;
+ }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ tty_register_device(xencons_driver, 0, NULL);
+#endif
+
+ if (xen_start_info->flags & SIF_INITDOMAIN) {
+ xencons_priv_irq = bind_virq_to_irq(VIRQ_CONSOLE);
+ (void)request_irq(xencons_priv_irq,
+ xencons_priv_interrupt, 0, "console", NULL);
+ } else {
+ xencons_ring_register_receiver(xencons_rx);
+ }
+
+ printk("Xen virtual console successfully installed as %s%d\n",
+ DRV(xencons_driver)->name,
+ DRV(xencons_driver)->name_base );
- return 0;
+ return 0;
}
module_init(xencons_init);
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff -r 4cff74aa6246 -r d7c794130ac5
linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c
--- a/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c Thu Sep 22
13:01:01 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c Thu Sep 22
13:04:14 2005
@@ -125,3 +125,13 @@
(void)xencons_ring_init();
}
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff -r 4cff74aa6246 -r d7c794130ac5
linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.h
--- a/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.h Thu Sep 22
13:01:01 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.h Thu Sep 22
13:04:14 2005
@@ -3,12 +3,21 @@
asmlinkage int xprintk(const char *fmt, ...);
-
int xencons_ring_init(void);
int xencons_ring_send(const char *data, unsigned len);
-typedef void (xencons_receiver_func)(char *buf, unsigned len,
- struct pt_regs *regs);
+typedef void (xencons_receiver_func)(
+ char *buf, unsigned len, struct pt_regs *regs);
void xencons_ring_register_receiver(xencons_receiver_func *f);
#endif /* _XENCONS_RING_H */
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff -r 4cff74aa6246 -r d7c794130ac5
linux-2.6-xen-sparse/drivers/xen/evtchn/evtchn.c
--- a/linux-2.6-xen-sparse/drivers/xen/evtchn/evtchn.c Thu Sep 22 13:01:01 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/evtchn/evtchn.c Thu Sep 22 13:04:14 2005
@@ -1,9 +1,9 @@
/******************************************************************************
* evtchn.c
*
- * Xenolinux driver for receiving and demuxing event-channel signals.
- *
- * Copyright (c) 2004, K A Fraser
+ * Driver for receiving and demuxing event-channel signals.
+ *
+ * Copyright (c) 2004-2005, K A Fraser
* Multi-process extensions Copyright (c) 2004, Steven Smith
*
* This file may be distributed separately from the Linux kernel, or
@@ -46,29 +46,18 @@
#include <linux/init.h>
#define XEN_EVTCHN_MASK_OPS
#include <asm-xen/evtchn.h>
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-#include <linux/devfs_fs_kernel.h>
-#define OLD_DEVFS
-#else
#include <linux/gfp.h>
-#endif
-
-#ifdef OLD_DEVFS
-/* NB. This must be shared amongst drivers if more things go in /dev/xen */
-static devfs_handle_t xen_dev_dir;
-#endif
struct per_user_data {
- /* Notification ring, accessed via /dev/xen/evtchn. */
-# define EVTCHN_RING_SIZE 2048 /* 2048 16-bit entries */
-# define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
- u16 *ring;
- unsigned int ring_cons, ring_prod, ring_overflow;
-
- /* Processes wait on this queue when ring is empty. */
- wait_queue_head_t evtchn_wait;
- struct fasync_struct *evtchn_async_queue;
+ /* Notification ring, accessed via /dev/xen/evtchn. */
+#define EVTCHN_RING_SIZE 2048 /* 2048 16-bit entries */
+#define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
+ u16 *ring;
+ unsigned int ring_cons, ring_prod, ring_overflow;
+
+ /* Processes wait on this queue when ring is empty. */
+ wait_queue_head_t evtchn_wait;
+ struct fasync_struct *evtchn_async_queue;
};
/* Who's bound to each port? */
@@ -77,356 +66,310 @@
void evtchn_device_upcall(int port)
{
- struct per_user_data *u;
-
- spin_lock(&port_user_lock);
-
- mask_evtchn(port);
- clear_evtchn(port);
-
- if ( (u = port_user[port]) != NULL )
- {
- if ( (u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE )
- {
- u->ring[EVTCHN_RING_MASK(u->ring_prod)] = (u16)port;
- if ( u->ring_cons == u->ring_prod++ )
- {
- wake_up_interruptible(&u->evtchn_wait);
- kill_fasync(&u->evtchn_async_queue, SIGIO, POLL_IN);
- }
- }
- else
- {
- u->ring_overflow = 1;
- }
- }
-
- spin_unlock(&port_user_lock);
+ struct per_user_data *u;
+
+ spin_lock(&port_user_lock);
+
+ mask_evtchn(port);
+ clear_evtchn(port);
+
+ if ((u = port_user[port]) != NULL) {
+ if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
+ u->ring[EVTCHN_RING_MASK(u->ring_prod)] = (u16)port;
+ if (u->ring_cons == u->ring_prod++) {
+ wake_up_interruptible(&u->evtchn_wait);
+ kill_fasync(&u->evtchn_async_queue, SIGIO,
POLL_IN);
+ }
+ } else {
+ u->ring_overflow = 1;
+ }
+ }
+
+ spin_unlock(&port_user_lock);
}
static ssize_t evtchn_read(struct file *file, char *buf,
size_t count, loff_t *ppos)
{
- int rc;
- unsigned int c, p, bytes1 = 0, bytes2 = 0;
- DECLARE_WAITQUEUE(wait, current);
- struct per_user_data *u = file->private_data;
-
- add_wait_queue(&u->evtchn_wait, &wait);
-
- count &= ~1; /* even number of bytes */
-
- if ( count == 0 )
- {
- rc = 0;
- goto out;
- }
-
- if ( count > PAGE_SIZE )
- count = PAGE_SIZE;
-
- for ( ; ; )
- {
- set_current_state(TASK_INTERRUPTIBLE);
-
- if ( (c = u->ring_cons) != (p = u->ring_prod) )
- break;
-
- if ( u->ring_overflow )
- {
- rc = -EFBIG;
- goto out;
- }
-
- if ( file->f_flags & O_NONBLOCK )
- {
- rc = -EAGAIN;
- goto out;
- }
-
- if ( signal_pending(current) )
- {
- rc = -ERESTARTSYS;
- goto out;
- }
-
- schedule();
- }
-
- /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
- if ( ((c ^ p) & EVTCHN_RING_SIZE) != 0 )
- {
- bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) * sizeof(u16);
- bytes2 = EVTCHN_RING_MASK(p) * sizeof(u16);
- }
- else
- {
- bytes1 = (p - c) * sizeof(u16);
- bytes2 = 0;
- }
-
- /* Truncate chunks according to caller's maximum byte count. */
- if ( bytes1 > count )
- {
- bytes1 = count;
- bytes2 = 0;
- }
- else if ( (bytes1 + bytes2) > count )
- {
- bytes2 = count - bytes1;
- }
-
- if ( copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) ||
- ((bytes2 != 0) && copy_to_user(&buf[bytes1], &u->ring[0], bytes2)) )
- {
- rc = -EFAULT;
- goto out;
- }
-
- u->ring_cons += (bytes1 + bytes2) / sizeof(u16);
-
- rc = bytes1 + bytes2;
+ int rc;
+ unsigned int c, p, bytes1 = 0, bytes2 = 0;
+ DECLARE_WAITQUEUE(wait, current);
+ struct per_user_data *u = file->private_data;
+
+ add_wait_queue(&u->evtchn_wait, &wait);
+
+ count &= ~1; /* even number of bytes */
+
+ if (count == 0) {
+ rc = 0;
+ goto out;
+ }
+
+ if (count > PAGE_SIZE)
+ count = PAGE_SIZE;
+
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if ((c = u->ring_cons) != (p = u->ring_prod))
+ break;
+
+ if (u->ring_overflow) {
+ rc = -EFBIG;
+ goto out;
+ }
+
+ if (file->f_flags & O_NONBLOCK) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ if (signal_pending(current)) {
+ rc = -ERESTARTSYS;
+ goto out;
+ }
+
+ schedule();
+ }
+
+ /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
+ if (((c ^ p) & EVTCHN_RING_SIZE) != 0) {
+ bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) *
+ sizeof(u16);
+ bytes2 = EVTCHN_RING_MASK(p) * sizeof(u16);
+ } else {
+ bytes1 = (p - c) * sizeof(u16);
+ bytes2 = 0;
+ }
+
+ /* Truncate chunks according to caller's maximum byte count. */
+ if (bytes1 > count) {
+ bytes1 = count;
+ bytes2 = 0;
+ } else if ((bytes1 + bytes2) > count) {
+ bytes2 = count - bytes1;
+ }
+
+ if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) ||
+ ((bytes2 != 0) &&
+ copy_to_user(&buf[bytes1], &u->ring[0], bytes2))) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ u->ring_cons += (bytes1 + bytes2) / sizeof(u16);
+
+ rc = bytes1 + bytes2;
out:
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&u->evtchn_wait, &wait);
- return rc;
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&u->evtchn_wait, &wait);
+ return rc;
}
static ssize_t evtchn_write(struct file *file, const char *buf,
size_t count, loff_t *ppos)
{
- int rc, i;
- u16 *kbuf = (u16 *)__get_free_page(GFP_KERNEL);
- struct per_user_data *u = file->private_data;
-
- if ( kbuf == NULL )
- return -ENOMEM;
-
- count &= ~1; /* even number of bytes */
-
- if ( count == 0 )
- {
- rc = 0;
- goto out;
- }
-
- if ( count > PAGE_SIZE )
- count = PAGE_SIZE;
-
- if ( copy_from_user(kbuf, buf, count) != 0 )
- {
- rc = -EFAULT;
- goto out;
- }
-
- spin_lock_irq(&port_user_lock);
- for ( i = 0; i < (count/2); i++ )
- if ( (kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u) )
- unmask_evtchn(kbuf[i]);
- spin_unlock_irq(&port_user_lock);
-
- rc = count;
+ int rc, i;
+ u16 *kbuf = (u16 *)__get_free_page(GFP_KERNEL);
+ struct per_user_data *u = file->private_data;
+
+ if (kbuf == NULL)
+ return -ENOMEM;
+
+ count &= ~1; /* even number of bytes */
+
+ if (count == 0) {
+ rc = 0;
+ goto out;
+ }
+
+ if (count > PAGE_SIZE)
+ count = PAGE_SIZE;
+
+ if (copy_from_user(kbuf, buf, count) != 0) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ spin_lock_irq(&port_user_lock);
+ for (i = 0; i < (count/2); i++)
+ if ((kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u))
+ unmask_evtchn(kbuf[i]);
+ spin_unlock_irq(&port_user_lock);
+
+ rc = count;
out:
- free_page((unsigned long)kbuf);
- return rc;
+ free_page((unsigned long)kbuf);
+ return rc;
}
static int evtchn_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
- int rc = 0;
- struct per_user_data *u = file->private_data;
-
- spin_lock_irq(&port_user_lock);
+ int rc = 0;
+ struct per_user_data *u = file->private_data;
+
+ spin_lock_irq(&port_user_lock);
- switch ( cmd )
- {
- case EVTCHN_RESET:
- /* Initialise the ring to empty. Clear errors. */
- u->ring_cons = u->ring_prod = u->ring_overflow = 0;
- break;
-
- case EVTCHN_BIND:
- if ( arg >= NR_EVENT_CHANNELS )
- {
- rc = -EINVAL;
- }
- else if ( port_user[arg] != NULL )
- {
- rc = -EISCONN;
- }
- else
- {
- port_user[arg] = u;
- unmask_evtchn(arg);
- }
- break;
-
- case EVTCHN_UNBIND:
- if ( arg >= NR_EVENT_CHANNELS )
- {
- rc = -EINVAL;
- }
- else if ( port_user[arg] != u )
- {
- rc = -ENOTCONN;
- }
- else
- {
- port_user[arg] = NULL;
- mask_evtchn(arg);
- }
- break;
-
- default:
- rc = -ENOSYS;
- break;
- }
-
- spin_unlock_irq(&port_user_lock);
-
- return rc;
+ switch (cmd) {
+ case EVTCHN_RESET:
+ /* Initialise the ring to empty. Clear errors. */
+ u->ring_cons = u->ring_prod = u->ring_overflow = 0;
+ break;
+
+ case EVTCHN_BIND:
+ if (arg >= NR_EVENT_CHANNELS) {
+ rc = -EINVAL;
+ } else if (port_user[arg] != NULL) {
+ rc = -EISCONN;
+ } else {
+ port_user[arg] = u;
+ unmask_evtchn(arg);
+ }
+ break;
+
+ case EVTCHN_UNBIND:
+ if (arg >= NR_EVENT_CHANNELS) {
+ rc = -EINVAL;
+ } else if (port_user[arg] != u) {
+ rc = -ENOTCONN;
+ } else {
+ port_user[arg] = NULL;
+ mask_evtchn(arg);
+ }
+ break;
+
+ default:
+ rc = -ENOSYS;
+ break;
+ }
+
+ spin_unlock_irq(&port_user_lock);
+
+ return rc;
}
static unsigned int evtchn_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = POLLOUT | POLLWRNORM;
- struct per_user_data *u = file->private_data;
-
- poll_wait(file, &u->evtchn_wait, wait);
- if ( u->ring_cons != u->ring_prod )
- mask |= POLLIN | POLLRDNORM;
- if ( u->ring_overflow )
- mask = POLLERR;
- return mask;
+ unsigned int mask = POLLOUT | POLLWRNORM;
+ struct per_user_data *u = file->private_data;
+
+ poll_wait(file, &u->evtchn_wait, wait);
+ if (u->ring_cons != u->ring_prod)
+ mask |= POLLIN | POLLRDNORM;
+ if (u->ring_overflow)
+ mask = POLLERR;
+ return mask;
}
static int evtchn_fasync(int fd, struct file *filp, int on)
{
- struct per_user_data *u = filp->private_data;
- return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
+ struct per_user_data *u = filp->private_data;
+ return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
}
static int evtchn_open(struct inode *inode, struct file *filp)
{
- struct per_user_data *u;
-
- if ( (u = kmalloc(sizeof(*u), GFP_KERNEL)) == NULL )
- return -ENOMEM;
-
- memset(u, 0, sizeof(*u));
- init_waitqueue_head(&u->evtchn_wait);
-
- if ( (u->ring = (u16 *)__get_free_page(GFP_KERNEL)) == NULL )
- {
- kfree(u);
- return -ENOMEM;
- }
-
- filp->private_data = u;
-
- return 0;
+ struct per_user_data *u;
+
+ if ((u = kmalloc(sizeof(*u), GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+
+ memset(u, 0, sizeof(*u));
+ init_waitqueue_head(&u->evtchn_wait);
+
+ if ((u->ring = (u16 *)__get_free_page(GFP_KERNEL)) == NULL)
+ {
+ kfree(u);
+ return -ENOMEM;
+ }
+
+ filp->private_data = u;
+
+ return 0;
}
static int evtchn_release(struct inode *inode, struct file *filp)
{
- int i;
- struct per_user_data *u = filp->private_data;
-
- spin_lock_irq(&port_user_lock);
-
- free_page((unsigned long)u->ring);
-
- for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
- {
- if ( port_user[i] == u )
- {
- port_user[i] = NULL;
- mask_evtchn(i);
- }
- }
-
- spin_unlock_irq(&port_user_lock);
-
- kfree(u);
-
- return 0;
+ int i;
+ struct per_user_data *u = filp->private_data;
+
+ spin_lock_irq(&port_user_lock);
+
+ free_page((unsigned long)u->ring);
+
+ for (i = 0; i < NR_EVENT_CHANNELS; i++)
+ {
+ if (port_user[i] == u)
+ {
+ port_user[i] = NULL;
+ mask_evtchn(i);
+ }
+ }
+
+ spin_unlock_irq(&port_user_lock);
+
+ kfree(u);
+
+ return 0;
}
static struct file_operations evtchn_fops = {
- .owner = THIS_MODULE,
- .read = evtchn_read,
- .write = evtchn_write,
- .ioctl = evtchn_ioctl,
- .poll = evtchn_poll,
- .fasync = evtchn_fasync,
- .open = evtchn_open,
- .release = evtchn_release,
+ .owner = THIS_MODULE,
+ .read = evtchn_read,
+ .write = evtchn_write,
+ .ioctl = evtchn_ioctl,
+ .poll = evtchn_poll,
+ .fasync = evtchn_fasync,
+ .open = evtchn_open,
+ .release = evtchn_release,
};
static struct miscdevice evtchn_miscdev = {
- .minor = EVTCHN_MINOR,
- .name = "evtchn",
- .fops = &evtchn_fops,
+ .minor = EVTCHN_MINOR,
+ .name = "evtchn",
+ .fops = &evtchn_fops,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- .devfs_name = "misc/evtchn",
+ .devfs_name = "misc/evtchn",
#endif
};
static int __init evtchn_init(void)
{
-#ifdef OLD_DEVFS
- devfs_handle_t symlink_handle;
- int pos;
- char link_dest[64];
-#endif
- int err;
-
- spin_lock_init(&port_user_lock);
- memset(port_user, 0, sizeof(port_user));
-
- /* (DEVFS) create '/dev/misc/evtchn'. */
- err = misc_register(&evtchn_miscdev);
- if ( err != 0 )
- {
- printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
- return err;
- }
-
-#ifdef OLD_DEVFS
- /* (DEVFS) create directory '/dev/xen'. */
- xen_dev_dir = devfs_mk_dir(NULL, "xen", NULL);
-
- /* (DEVFS) &link_dest[pos] == '../misc/evtchn'. */
- pos = devfs_generate_path(evtchn_miscdev.devfs_handle,
- &link_dest[3],
- sizeof(link_dest) - 3);
- if ( pos >= 0 )
- strncpy(&link_dest[pos], "../", 3);
-
- /* (DEVFS) symlink '/dev/xen/evtchn' -> '../misc/evtchn'. */
- (void)devfs_mk_symlink(xen_dev_dir,
- "evtchn",
- DEVFS_FL_DEFAULT,
- &link_dest[pos],
- &symlink_handle,
- NULL);
-
- /* (DEVFS) automatically destroy the symlink with its destination. */
- devfs_auto_unregister(evtchn_miscdev.devfs_handle, symlink_handle);
-#endif
-
- printk("Event-channel device installed.\n");
-
- return 0;
+ int err;
+
+ spin_lock_init(&port_user_lock);
+ memset(port_user, 0, sizeof(port_user));
+
+ /* (DEVFS) create '/dev/misc/evtchn'. */
+ err = misc_register(&evtchn_miscdev);
+ if (err != 0)
+ {
+ printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
+ return err;
+ }
+
+ printk("Event-channel device installed.\n");
+
+ return 0;
}
static void evtchn_cleanup(void)
{
- misc_deregister(&evtchn_miscdev);
+ misc_deregister(&evtchn_miscdev);
}
module_init(evtchn_init);
module_exit(evtchn_cleanup);
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff -r 4cff74aa6246 -r d7c794130ac5
linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c
--- a/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c Thu Sep 22
13:01:01 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c Thu Sep 22
13:04:14 2005
@@ -41,232 +41,253 @@
static int privcmd_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long data)
{
- int ret = -ENOSYS;
-
- switch ( cmd )
- {
- case IOCTL_PRIVCMD_HYPERCALL:
- {
- privcmd_hypercall_t hypercall;
+ int ret = -ENOSYS;
+
+ switch (cmd) {
+ case IOCTL_PRIVCMD_HYPERCALL: {
+ privcmd_hypercall_t hypercall;
- if ( copy_from_user(&hypercall, (void *)data, sizeof(hypercall)) )
- return -EFAULT;
+ if (copy_from_user(&hypercall, (void *)data,
+ sizeof(hypercall)))
+ return -EFAULT;
#if defined(__i386__)
- __asm__ __volatile__ (
- "pushl %%ebx; pushl %%ecx; pushl %%edx; pushl %%esi; pushl %%edi; "
- "movl 4(%%eax),%%ebx ;"
- "movl 8(%%eax),%%ecx ;"
- "movl 12(%%eax),%%edx ;"
- "movl 16(%%eax),%%esi ;"
- "movl 20(%%eax),%%edi ;"
- "movl (%%eax),%%eax ;"
- TRAP_INSTR "; "
- "popl %%edi; popl %%esi; popl %%edx; popl %%ecx; popl %%ebx"
- : "=a" (ret) : "0" (&hypercall) : "memory" );
+ __asm__ __volatile__ (
+ "pushl %%ebx; pushl %%ecx; pushl %%edx; "
+ "pushl %%esi; pushl %%edi; "
+ "movl 4(%%eax),%%ebx ;"
+ "movl 8(%%eax),%%ecx ;"
+ "movl 12(%%eax),%%edx ;"
+ "movl 16(%%eax),%%esi ;"
+ "movl 20(%%eax),%%edi ;"
+ "movl (%%eax),%%eax ;"
+ TRAP_INSTR "; "
+ "popl %%edi; popl %%esi; popl %%edx; "
+ "popl %%ecx; popl %%ebx"
+ : "=a" (ret) : "0" (&hypercall) : "memory" );
#elif defined (__x86_64__)
- {
- long ign1, ign2, ign3;
- __asm__ __volatile__ (
- "movq %8,%%r10; movq %9,%%r8;" TRAP_INSTR
- : "=a" (ret), "=D" (ign1), "=S" (ign2), "=d" (ign3)
- : "0" ((unsigned long)hypercall.op),
- "1" ((unsigned long)hypercall.arg[0]),
- "2" ((unsigned long)hypercall.arg[1]),
- "3" ((unsigned long)hypercall.arg[2]),
- "g" ((unsigned long)hypercall.arg[3]),
- "g" ((unsigned long)hypercall.arg[4])
- : "r11","rcx","r8","r10","memory");
- }
+ {
+ long ign1, ign2, ign3;
+ __asm__ __volatile__ (
+ "movq %8,%%r10; movq %9,%%r8;" TRAP_INSTR
+ : "=a" (ret), "=D" (ign1),
+ "=S" (ign2), "=d" (ign3)
+ : "0" ((unsigned long)hypercall.op),
+ "1" ((unsigned long)hypercall.arg[0]),
+ "2" ((unsigned long)hypercall.arg[1]),
+ "3" ((unsigned long)hypercall.arg[2]),
+ "g" ((unsigned long)hypercall.arg[3]),
+ "g" ((unsigned long)hypercall.arg[4])
+ : "r11","rcx","r8","r10","memory");
+ }
#elif defined (__ia64__)
- __asm__ __volatile__ (
- ";; mov r14=%2; mov r15=%3; mov r16=%4; mov r17=%5; mov r18=%6;"
- "mov r2=%1; break 0x1000;; mov %0=r8 ;;"
- : "=r" (ret)
- : "r" (hypercall.op),
- "r" (hypercall.arg[0]),
- "r" (hypercall.arg[1]),
- "r" (hypercall.arg[2]),
- "r" (hypercall.arg[3]),
- "r" (hypercall.arg[4])
- : "r14","r15","r16","r17","r18","r2","r8","memory");
+ __asm__ __volatile__ (
+ ";; mov r14=%2; mov r15=%3; "
+ "mov r16=%4; mov r17=%5; mov r18=%6;"
+ "mov r2=%1; break 0x1000;; mov %0=r8 ;;"
+ : "=r" (ret)
+ : "r" (hypercall.op),
+ "r" (hypercall.arg[0]),
+ "r" (hypercall.arg[1]),
+ "r" (hypercall.arg[2]),
+ "r" (hypercall.arg[3]),
+ "r" (hypercall.arg[4])
+ : "r14","r15","r16","r17","r18","r2","r8","memory");
#endif
- }
- break;
+ }
+ break;
#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
- case IOCTL_PRIVCMD_MMAP:
- {
+ case IOCTL_PRIVCMD_MMAP: {
#define PRIVCMD_MMAP_SZ 32
- privcmd_mmap_t mmapcmd;
- privcmd_mmap_entry_t msg[PRIVCMD_MMAP_SZ], *p;
- int i, rc;
-
- if ( copy_from_user(&mmapcmd, (void *)data, sizeof(mmapcmd)) )
- return -EFAULT;
-
- p = mmapcmd.entry;
-
- for (i=0; i<mmapcmd.num; i+=PRIVCMD_MMAP_SZ, p+=PRIVCMD_MMAP_SZ)
- {
- int j, n = ((mmapcmd.num-i)>PRIVCMD_MMAP_SZ)?
- PRIVCMD_MMAP_SZ:(mmapcmd.num-i);
-
-
- if ( copy_from_user(&msg, p, n*sizeof(privcmd_mmap_entry_t)) )
- return -EFAULT;
+ privcmd_mmap_t mmapcmd;
+ privcmd_mmap_entry_t msg[PRIVCMD_MMAP_SZ], *p;
+ int i, rc;
+
+ if (copy_from_user(&mmapcmd, (void *)data, sizeof(mmapcmd)))
+ return -EFAULT;
+
+ p = mmapcmd.entry;
+
+ for (i = 0; i < mmapcmd.num;
+ i += PRIVCMD_MMAP_SZ, p += PRIVCMD_MMAP_SZ) {
+ int j, n = ((mmapcmd.num-i)>PRIVCMD_MMAP_SZ)?
+ PRIVCMD_MMAP_SZ:(mmapcmd.num-i);
+
+ if (copy_from_user(&msg, p,
+ n*sizeof(privcmd_mmap_entry_t)))
+ return -EFAULT;
- for ( j = 0; j < n; j++ )
- {
- struct vm_area_struct *vma =
- find_vma( current->mm, msg[j].va );
-
- if ( !vma )
- return -EINVAL;
-
- if ( msg[j].va > PAGE_OFFSET )
- return -EINVAL;
-
- if ( (msg[j].va + (msg[j].npages<<PAGE_SHIFT)) > vma->vm_end )
- return -EINVAL;
-
- if ( (rc = direct_remap_pfn_range(vma,
- msg[j].va&PAGE_MASK,
- msg[j].mfn,
- msg[j].npages<<PAGE_SHIFT,
- vma->vm_page_prot,
- mmapcmd.dom)) < 0 )
- return rc;
- }
- }
- ret = 0;
- }
- break;
-
- case IOCTL_PRIVCMD_MMAPBATCH:
- {
- mmu_update_t u;
- privcmd_mmapbatch_t m;
- struct vm_area_struct *vma = NULL;
- unsigned long *p, addr;
- unsigned long mfn, ptep;
- int i;
-
- if ( copy_from_user(&m, (void *)data, sizeof(m)) )
- { ret = -EFAULT; goto batch_err; }
-
- vma = find_vma( current->mm, m.addr );
-
- if ( !vma )
- { ret = -EINVAL; goto batch_err; }
-
- if ( m.addr > PAGE_OFFSET )
- { ret = -EFAULT; goto batch_err; }
-
- if ( (m.addr + (m.num<<PAGE_SHIFT)) > vma->vm_end )
- { ret = -EFAULT; goto batch_err; }
-
- p = m.arr;
- addr = m.addr;
- for ( i = 0; i < m.num; i++, addr += PAGE_SIZE, p++ )
- {
- if ( get_user(mfn, p) )
- return -EFAULT;
-
- ret = create_lookup_pte_addr(vma->vm_mm, addr, &ptep);
- if (ret)
- goto batch_err;
-
- u.val = pte_val_ma(pfn_pte_ma(mfn, vma->vm_page_prot));
- u.ptr = ptep;
-
- if ( unlikely(HYPERVISOR_mmu_update(&u, 1, NULL, m.dom) < 0) )
- put_user(0xF0000000 | mfn, p);
- }
-
- ret = 0;
- break;
-
- batch_err:
- printk("batch_err ret=%d vma=%p addr=%lx num=%d arr=%p %lx-%lx\n",
- ret, vma, m.addr, m.num, m.arr,
- vma ? vma->vm_start : 0, vma ? vma->vm_end : 0);
- break;
- }
- break;
+ for (j = 0; j < n; j++) {
+ struct vm_area_struct *vma =
+ find_vma( current->mm, msg[j].va );
+
+ if (!vma)
+ return -EINVAL;
+
+ if (msg[j].va > PAGE_OFFSET)
+ return -EINVAL;
+
+ if ((msg[j].va + (msg[j].npages << PAGE_SHIFT))
+ > vma->vm_end )
+ return -EINVAL;
+
+ if ((rc = direct_remap_pfn_range(
+ vma,
+ msg[j].va&PAGE_MASK,
+ msg[j].mfn,
+ msg[j].npages<<PAGE_SHIFT,
+ vma->vm_page_prot,
+ mmapcmd.dom)) < 0)
+ return rc;
+ }
+ }
+ ret = 0;
+ }
+ break;
+
+ case IOCTL_PRIVCMD_MMAPBATCH: {
+ mmu_update_t u;
+ privcmd_mmapbatch_t m;
+ struct vm_area_struct *vma = NULL;
+ unsigned long *p, addr;
+ unsigned long mfn, ptep;
+ int i;
+
+ if (copy_from_user(&m, (void *)data, sizeof(m))) {
+ ret = -EFAULT;
+ goto batch_err;
+ }
+
+ vma = find_vma( current->mm, m.addr );
+ if (!vma) {
+ ret = -EINVAL;
+ goto batch_err;
+ }
+
+ if (m.addr > PAGE_OFFSET) {
+ ret = -EFAULT;
+ goto batch_err;
+ }
+
+ if ((m.addr + (m.num<<PAGE_SHIFT)) > vma->vm_end) {
+ ret = -EFAULT;
+ goto batch_err;
+ }
+
+ p = m.arr;
+ addr = m.addr;
+ for (i = 0; i < m.num; i++, addr += PAGE_SIZE, p++) {
+ if (get_user(mfn, p))
+ return -EFAULT;
+
+ ret = create_lookup_pte_addr(vma->vm_mm, addr, &ptep);
+ if (ret)
+ goto batch_err;
+
+ u.val = pte_val_ma(pfn_pte_ma(mfn, vma->vm_page_prot));
+ u.ptr = ptep;
+
+ if (HYPERVISOR_mmu_update(&u, 1, NULL, m.dom) < 0)
+ put_user(0xF0000000 | mfn, p);
+ }
+
+ ret = 0;
+ break;
+
+ batch_err:
+ printk("batch_err ret=%d vma=%p addr=%lx "
+ "num=%d arr=%p %lx-%lx\n",
+ ret, vma, m.addr, m.num, m.arr,
+ vma ? vma->vm_start : 0, vma ? vma->vm_end : 0);
+ break;
+ }
+ break;
#endif
- case IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN:
- {
- unsigned long m2pv = (unsigned long)machine_to_phys_mapping;
- pgd_t *pgd = pgd_offset_k(m2pv);
- pud_t *pud = pud_offset(pgd, m2pv);
- pmd_t *pmd = pmd_offset(pud, m2pv);
- unsigned long m2p_start_mfn = (*(unsigned long *)pmd) >> PAGE_SHIFT;
- ret = put_user(m2p_start_mfn, (unsigned long *)data) ? -EFAULT: 0;
- }
- break;
-
- case IOCTL_PRIVCMD_INITDOMAIN_STORE:
- {
- extern int do_xenbus_probe(void*);
- unsigned long page;
-
- if (xen_start_info->store_evtchn != 0) {
- ret = xen_start_info->store_mfn;
- break;
- }
-
- /* Allocate page. */
- page = get_zeroed_page(GFP_KERNEL);
- if (!page) {
- ret = -ENOMEM;
- break;
- }
-
- /* We don't refcnt properly, so set reserved on page.
- * (this allocation is permanent) */
- SetPageReserved(virt_to_page(page));
-
- /* Initial connect. Setup channel and page. */
- xen_start_info->store_evtchn = data;
- xen_start_info->store_mfn = pfn_to_mfn(virt_to_phys((void *)page) >>
- PAGE_SHIFT);
- ret = xen_start_info->store_mfn;
-
- /* We'll return then this will wait for daemon to answer */
- kthread_run(do_xenbus_probe, NULL, "xenbus_probe");
- }
- break;
-
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
+ case IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN: {
+ unsigned long m2pv = (unsigned long)machine_to_phys_mapping;
+ pgd_t *pgd = pgd_offset_k(m2pv);
+ pud_t *pud = pud_offset(pgd, m2pv);
+ pmd_t *pmd = pmd_offset(pud, m2pv);
+ unsigned long m2p_start_mfn =
+ (*(unsigned long *)pmd) >> PAGE_SHIFT;
+ ret = put_user(m2p_start_mfn, (unsigned long *)data) ?
+ -EFAULT: 0;
+ }
+ break;
+
+ case IOCTL_PRIVCMD_INITDOMAIN_STORE: {
+ extern int do_xenbus_probe(void*);
+ unsigned long page;
+
+ if (xen_start_info->store_evtchn != 0) {
+ ret = xen_start_info->store_mfn;
+ break;
+ }
+
+ /* Allocate page. */
+ page = get_zeroed_page(GFP_KERNEL);
+ if (!page) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ /* We don't refcnt properly, so set reserved on page.
+ * (this allocation is permanent) */
+ SetPageReserved(virt_to_page(page));
+
+ /* Initial connect. Setup channel and page. */
+ xen_start_info->store_evtchn = data;
+ xen_start_info->store_mfn =
+ pfn_to_mfn(virt_to_phys((void *)page) >>
+ PAGE_SHIFT);
+ ret = xen_start_info->store_mfn;
+
+ /* We'll return then this will wait for daemon to answer */
+ kthread_run(do_xenbus_probe, NULL, "xenbus_probe");
+ }
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
}
static int privcmd_mmap(struct file * file, struct vm_area_struct * vma)
{
- /* DONTCOPY is essential for Xen as copy_page_range is broken. */
- vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
-
- return 0;
+ /* DONTCOPY is essential for Xen as copy_page_range is broken. */
+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
+
+ return 0;
}
static struct file_operations privcmd_file_ops = {
- .ioctl = privcmd_ioctl,
- .mmap = privcmd_mmap,
+ .ioctl = privcmd_ioctl,
+ .mmap = privcmd_mmap,
};
static int __init privcmd_init(void)
{
- privcmd_intf = create_xen_proc_entry("privcmd", 0400);
- if ( privcmd_intf != NULL )
- privcmd_intf->proc_fops = &privcmd_file_ops;
-
- return 0;
+ privcmd_intf = create_xen_proc_entry("privcmd", 0400);
+ if (privcmd_intf != NULL)
+ privcmd_intf->proc_fops = &privcmd_file_ops;
+
+ return 0;
}
__initcall(privcmd_init);
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff -r 4cff74aa6246 -r d7c794130ac5
linux-2.6-xen-sparse/drivers/xen/tpmback/common.h
--- a/linux-2.6-xen-sparse/drivers/xen/tpmback/common.h Thu Sep 22 13:01:01 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/common.h Thu Sep 22 13:04:14 2005
@@ -84,3 +84,13 @@
#define MMAP_VADDR(t,_req) ((t)->mmap_vstart + ((_req) * PAGE_SIZE))
#endif /* __TPMIF__BACKEND__COMMON_H__ */
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff -r 4cff74aa6246 -r d7c794130ac5
linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c
--- a/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c Thu Sep 22
13:01:01 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c Thu Sep 22
13:04:14 2005
@@ -1075,3 +1075,13 @@
}
__initcall(tpmback_init);
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff -r 4cff74aa6246 -r d7c794130ac5
linux-2.6-xen-sparse/drivers/xen/tpmback/xenbus.c
--- a/linux-2.6-xen-sparse/drivers/xen/tpmback/xenbus.c Thu Sep 22 13:01:01 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/xenbus.c Thu Sep 22 13:04:14 2005
@@ -268,3 +268,13 @@
{
xenbus_register_backend(&tpmback);
}
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff -r 4cff74aa6246 -r d7c794130ac5
linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c
--- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c Thu Sep 22
13:01:01 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c Thu Sep 22
13:04:14 2005
@@ -741,3 +741,13 @@
}
__initcall(tpmif_init);
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff -r 4cff74aa6246 -r d7c794130ac5
linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h
--- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h Thu Sep 22
13:01:01 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h Thu Sep 22
13:04:14 2005
@@ -38,3 +38,13 @@
};
#endif
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff -r 4cff74aa6246 -r d7c794130ac5
linux-2.6-xen-sparse/drivers/xen/usbback/control.c
--- a/linux-2.6-xen-sparse/drivers/xen/usbback/control.c Thu Sep 22
13:01:01 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/usbback/control.c Thu Sep 22
13:04:14 2005
@@ -59,3 +59,13 @@
memcpy(cmsg.msg, &st, sizeof(st));
ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
}
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff -r 4cff74aa6246 -r d7c794130ac5
linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c
--- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c Thu Sep 22
13:01:01 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c Thu Sep 22
13:04:14 2005
@@ -231,3 +231,13 @@
unbind_evtchn_from_irqhandler(xen_start_info->store_evtchn, &xb_waitq);
}
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff -r 4cff74aa6246 -r d7c794130ac5
linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.h
--- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.h Thu Sep 22
13:01:01 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.h Thu Sep 22
13:04:14 2005
@@ -39,3 +39,13 @@
extern wait_queue_head_t xb_waitq;
#endif /* _XENBUS_COMMS_H */
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff -r 4cff74aa6246 -r d7c794130ac5
linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c
--- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c Thu Sep 22
13:01:01 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c Thu Sep 22
13:04:14 2005
@@ -186,3 +186,13 @@
}
__initcall(xenbus_dev_init);
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff -r 4cff74aa6246 -r d7c794130ac5
linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c
--- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c Thu Sep 22
13:01:01 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c Thu Sep 22
13:04:14 2005
@@ -687,3 +687,13 @@
}
postcore_initcall(xenbus_probe_init);
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff -r 4cff74aa6246 -r d7c794130ac5
linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c
--- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c Thu Sep 22
13:01:01 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c Thu Sep 22
13:04:14 2005
@@ -566,3 +566,13 @@
return PTR_ERR(watcher);
return 0;
}
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|