WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

Re: [Xen-devel] [Patch 5/7] pvSCSI driver

To: Jun Kamada <kama@xxxxxxxxxxxxxx>
Subject: Re: [Xen-devel] [Patch 5/7] pvSCSI driver
From: Steven Smith <steven.smith@xxxxxxxxxxxxx>
Date: Wed, 27 Feb 2008 11:16:52 +0000
Cc: xen-devel@xxxxxxxxxxxxxxxxxxx
Delivery-date: Wed, 27 Feb 2008 03:18:57 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
In-reply-to: <20080218190630.E752.EB2C8575@xxxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <20080218190630.E752.EB2C8575@xxxxxxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
> diff -r 77e3c0d472ac -r 81e347dc1bfe drivers/xen/scsiback/comback.c
> --- /dev/null Thu Jan 01 00:00:00 1970 +0000
> +++ b/drivers/xen/scsiback/comback.c  Fri Feb 15 19:49:24 2008 +0900
...
> +#include <scsi/scsi.h>
> +#include <scsi/scsi_host.h>
> +#include <scsi/scsi_device.h>
> +#include "comback.h"
> +
> +extern struct list_head pending_free;
> +extern int vscsiif_reqs;
Is there any reason not to pull these into comback.h?

> +
> +static DEFINE_SPINLOCK(pending_free_lock);
> +static DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
> +
> +extern void scsiback_cmd_exec(pending_req_t *);
> +extern int copy_request_ring_info(struct comback_info *,
> +             struct vscsiif_request *, pending_req_t *);
> +extern void scsiback_reset_exec(pending_req_t *);
> +
> +/* ------------------------------------------------------------ */
> +/*   for frontend to backend communication                   */
> +/* ------------------------------------------------------------ */
> +
> +static pending_req_t * alloc_req(void)
> +{
> +     pending_req_t *req = NULL;
> +     unsigned long flags;
> +
> +     spin_lock_irqsave(&pending_free_lock, flags);
> +     if (!list_empty(&pending_free)) {
> +             req = list_entry(pending_free.next, pending_req_t, free_list);
> +             list_del(&req->free_list);
> +     }
> +     spin_unlock_irqrestore(&pending_free_lock, flags);
> +     return req;
> +}
> +
> +void free_req(pending_req_t *req)
> +{
> +     unsigned long flags;
> +     int was_empty;
> +
> +     spin_lock_irqsave(&pending_free_lock, flags);
> +     was_empty = list_empty(&pending_free);
> +     list_add(&req->free_list, &pending_free);
> +     spin_unlock_irqrestore(&pending_free_lock, flags);
> +     if (was_empty)
> +             wake_up(&pending_free_wq);
> +}
> +
> +static void comback_notify_work(struct comback_info *info)
> +{
> +     info->waiting_reqs = 1;
> +     wake_up(&info->wq);
> +}
> +
> +irqreturn_t comback_intr(int irq, void *dev_id, struct pt_regs *regs)
> +{
> +     comback_notify_work((struct comback_info *)dev_id);
> +     return IRQ_HANDLED;
> +}
> +
> +static int do_comback_cmd_fn(struct comback_info *info)
> +{
> +     struct vscsiif_back_ring *ring = &info->ring;
> +     struct vscsiif_request  *ring_req;
> +
> +     pending_req_t *pending_req;
> +     RING_IDX rc, rp;
> +     int err;
> +     int more_to_do = 0;
> +
> +     DPRINTK("%s\n",__FUNCTION__);
> +
> +     rc = ring->req_cons;
> +     rp = ring->sring->req_prod;
> +     rmb();
> +
> +     while ((rc != rp)) {
> +             if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
> +                     break;
> +
> +             pending_req = alloc_req();
> +             if (NULL == pending_req) {
> +                     more_to_do = 1;
> +                     break;
> +             }
> +
> +             ring_req = RING_GET_REQUEST(ring, rc);
> +             ring->req_cons = ++rc;
> +
> +             err = copy_request_ring_info(info, ring_req, pending_req);
> +
> +             if (pending_req->cmd == VSCSIIF_CMND_SCSI) {
> +                     scsiback_cmd_exec(pending_req);
> +             } else if (pending_req->cmd == VSCSIIF_CMND_SCSI_RESET) {
> +                     scsiback_reset_exec(pending_req);
> +             }
> +     }
> +
Do you not need to do a FINAL_CHECK_FOR_REQUESTS around here somewhere?


> +     return more_to_do;
> +}
> +
> +int comback_schedule(void *data)
> +{
> +     struct comback_info *info = (struct comback_info *)data;
> +
> +     DPRINTK("%s\n",__FUNCTION__);
> +
> +     scsiback_get(info);
> +
> +     while (!kthread_should_stop()) {
> +             wait_event_interruptible(
> +                     info->wq,
> +                     info->waiting_reqs || kthread_should_stop());
> +             wait_event_interruptible(
> +                     pending_free_wq,
> +                     !list_empty(&pending_free) || kthread_should_stop());
> +
> +             info->waiting_reqs = 0;
> +             smp_mb();
> +
> +             if (do_comback_cmd_fn(info))
> +                     info->waiting_reqs = 1;
> +     }
> +
> +     info->kthread = NULL;
> +     scsiback_put(info);
> +
> +     return 0;
> +}
> diff -r 77e3c0d472ac -r 81e347dc1bfe drivers/xen/scsiback/comback.h
> --- /dev/null Thu Jan 01 00:00:00 1970 +0000
> +++ b/drivers/xen/scsiback/comback.h  Fri Feb 15 19:49:24 2008 +0900
...

> +struct comback_info {
> +     struct xenbus_device *dev;
> +     struct scsi_device *sdev;
> +
> +     domid_t domid;
> +     unsigned int evtchn;
> +     unsigned int irq;
> +
> +     struct vscsiif_back_ring  ring;
> +     struct vm_struct *ring_area;
> +
> +     grant_handle_t shmem_handle;
> +     grant_ref_t shmem_ref;
> +
> +     struct work_struct scsiback_work;
> +
> +     spinlock_t ring_lock;
> +     atomic_t refcnt;
> +
> +     struct task_struct *kthread;
> +     wait_queue_head_t waiting_to_free;
> +     wait_queue_head_t wq;
> +     unsigned int waiting_reqs;
> +     struct page **mmap_pages;
> +
> +};
> +
> +typedef struct {
> +     unsigned char cmd;
> +     struct comback_info *info;
> +     uint8_t data_dir;
> +     uint16_t rqid;
> +     uint8_t use_sg;
> +     uint32_t request_bufflen;
> +     atomic_t pendcnt;
Is this ever anything other than 0 or 1?

> +     struct request *rq;
> +     struct scsiback_request_segment{
> +             grant_ref_t gref;
> +             uint16_t offset;
> +             uint16_t length;
> +     } pend_seg[VSCSIIF_SG_TABLESIZE];
> +     struct list_head free_list;
> +} pending_req_t;
Would it not be easier just to ember a struct vcsiif_request in there?

> +typedef struct scsi_pending_req              scsi_pending_req_t;
> +
> +irqreturn_t scsiback_intr(int, void *, struct pt_regs *);
> +int scsiback_init_sring(struct comback_info *,
> +             unsigned long, unsigned int);
> +int scsiback_schedule(void *data);
> +
> +
> +#define scsiback_get(_b) (atomic_inc(&(_b)->refcnt))
> +#define scsiback_put(_b)                             \
> +     do {                                            \
> +             if (atomic_dec_and_test(&(_b)->refcnt)) \
> +                     wake_up(&(_b)->waiting_to_free);\
> +     } while (0)
> +
> +struct comback_info *scsiinfo_alloc(domid_t domid);
> +void scsiback_free(struct comback_info *info);
> +void scsiback_disconnect(struct comback_info *info);
> +void __init scsiback_interface_init(void);
> +void __exit scsiback_interface_exit(void);
> +int scsiif_xenbus_init(void);
> +void scsiif_xenbus_unregister(void);
> +
> +
> +#endif /* __SCSIIF__BACKEND__COMMON_H__ */
> diff -r 77e3c0d472ac -r 81e347dc1bfe drivers/xen/scsiback/interface.c
> --- /dev/null Thu Jan 01 00:00:00 1970 +0000
> +++ b/drivers/xen/scsiback/interface.c        Fri Feb 15 19:49:24 2008 +0900
...

> +struct comback_info *scsiinfo_alloc(domid_t domid)
> +{
> +     struct comback_info *info;
> +
> +     info = kmem_cache_alloc(scsiback_cachep, GFP_KERNEL);
> +     if (!info)
> +             return ERR_PTR(-ENOMEM);
> +
> +     memset(info, 0, sizeof(*info));
> +     info->domid = domid;
> +     spin_lock_init(&info->ring_lock);
> +     atomic_set(&info->refcnt, 1);
> +     init_waitqueue_head(&info->wq);
> +     init_waitqueue_head(&info->waiting_to_free);
> +
> +     return info;
> +}
> +
> +static int map_frontend_page( struct comback_info *info, unsigned long 
> ring_ref)
> +{
> +     struct gnttab_map_grant_ref op;
> +     int err;
> +
> +     gnttab_set_map_op(&op, (unsigned long)info->ring_area->addr,
> +                             GNTMAP_host_map, ring_ref,
> +                             info->domid);
> +
> +     err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
> +     BUG_ON(err);
> +
> +     if (op.status) {
> +             printk(KERN_ERR "scsiback: Grant table operation failure !\n");
> +             return op.status;
> +     }
> +
> +     info->shmem_ref    = ring_ref;
> +     info->shmem_handle = op.handle;
> +
> +     return 0;
> +}
> +
> +static void unmap_frontend_page(struct comback_info *info)
> +{
> +     struct gnttab_unmap_grant_ref op;
> +     int err;
> +
> +     gnttab_set_unmap_op(&op, (unsigned long)info->ring_area->addr,
> +                             GNTMAP_host_map, info->shmem_handle);
> +
> +     err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
> +     BUG_ON(err);
> +
> +}
> +
> +int scsiback_init_sring(struct comback_info *info,
> +     unsigned long ring_ref, unsigned int evtchn)
> +{
> +     struct vscsiif_sring *sring;
> +     int err;
> +
> +     if (info->irq) {
> +             printk(KERN_ERR "scsiback: Already connected through?\n");
> +             return 0;
> +     }
> +
> +     info->ring_area = alloc_vm_area(PAGE_SIZE);
> +     if (!info)
> +             return -ENOMEM;
> +
> +     err = map_frontend_page(info, ring_ref);
> +     if (err)
> +             goto free_vm;
> +
> +     sring = (struct vscsiif_sring *) info->ring_area->addr;
> +     BACK_RING_INIT(&info->ring, sring, PAGE_SIZE);
> +
> +     err = bind_interdomain_evtchn_to_irqhandler(
> +                     info->domid, evtchn,
> +                     comback_intr, 0, "vscsiif-backend", info);
> +
> +     if (err < 0)
> +             goto unmap_page;
> +             
> +     info->irq = err;
> +
> +     return 0;
> +
> +unmap_page:
> +     unmap_frontend_page(info);
> +free_vm:
> +     free_vm_area(info->ring_area);
> +     return err;
> +}
> +
> +void scsiback_disconnect(struct comback_info *info)
> +{
> +     if (info->kthread) {
> +             kthread_stop(info->kthread);
> +             info->kthread = NULL;
> +     }
> +
> +     atomic_dec(&info->refcnt);
> +     wait_event(info->waiting_to_free, atomic_read(&info->refcnt) == 0);
> +     atomic_inc(&info->refcnt);
That looks a bit odd.  Would you mind explaining your reference
counting rules, please?


> +
> +     if (info->irq) {
> +             unbind_from_irqhandler(info->irq, info);
> +             info->irq = 0;
> +     }
> +
> +     if (info->ring.sring) {
> +             unmap_frontend_page(info);
> +             free_vm_area(info->ring_area);
> +             info->ring.sring = NULL;
> +     }
> +}
> +
> +void scsiback_free(struct comback_info *info)
> +{
> +     if (!atomic_dec_and_test(&info->refcnt))
> +             BUG();
> +     kmem_cache_free(scsiback_cachep, info);
> +}
> +
> +void __init scsiback_interface_init(void)
> +{
> +     scsiback_cachep = kmem_cache_create("vscsiif_cache",
> +             sizeof(struct comback_info), 0, 0, NULL, NULL);
What happens if this fails?

> +}
> +
> +void __exit scsiback_interface_exit(void)
> +{
> +     kmem_cache_destroy(scsiback_cachep);
> +}
> diff -r 77e3c0d472ac -r 81e347dc1bfe drivers/xen/scsiback/scsiback.c
> --- /dev/null Thu Jan 01 00:00:00 1970 +0000
> +++ b/drivers/xen/scsiback/scsiback.c Fri Feb 15 19:49:24 2008 +0900
...
> +#include "comback.h"
> +
> +extern void free_req(pending_req_t *req);
> +
> +int vscsiif_reqs = VSCSIIF_DEFAULT_CAN_QUEUE;
> +module_param_named(reqs, vscsiif_reqs, int, 0);
> +MODULE_PARM_DESC(reqs, "Number of scsiback requests to allocate");
> +
> +
> +#define INVALID_GRANT_HANDLE 0xFFFF
> +#define SCSIBACK_INVALID_HANDLE (~0)
> +#define VSCSIIF_TIMEOUT              (5*HZ)
> +
> +static pending_req_t *pending_reqs;
> +struct list_head pending_free;
> +static struct page **pending_pages;
> +static grant_handle_t *pending_grant_handles;
> +
> +static inline int vaddr_pagenr(pending_req_t *req, int seg)
> +{
> +     return (req - pending_reqs) * VSCSIIF_SG_TABLESIZE + seg;
> +}
Okay, so pending_pages is a big array with VSCSIIF_SG_TABLESIZE slots
per request?

> +
> +static inline unsigned long vaddr(pending_req_t *req, int seg)
> +{
> +     unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
> +     return (unsigned long)pfn_to_kaddr(pfn);
> +}
Would it make more sense for this to return a pointer, rather than an
unsigned long, given that it's a virtual address?

Also, inline in .c files is usually a mistake.

> +
> +#define pending_handle(_req, _seg) \
> +     (pending_grant_handles[vaddr_pagenr(_req, _seg)])
> +
> +
> +static void fast_flush_area(pending_req_t *req)
> +{
> +     struct gnttab_unmap_grant_ref unmap[VSCSIIF_SG_TABLESIZE];
> +     unsigned int i, invcount = 0;
> +     grant_handle_t handle;
> +     int err;
> +
> +     if (req->use_sg) {
> +             for (i = 0; i < req->use_sg; i++) {
> +                     handle = pending_handle(req, i);
> +                     if (handle == SCSIBACK_INVALID_HANDLE)
> +                             continue;
> +                     gnttab_set_unmap_op(&unmap[i], vaddr(req, i),
> +                                             GNTMAP_host_map, handle);
> +                     pending_handle(req, i) = SCSIBACK_INVALID_HANDLE;
> +                     invcount++;
> +             }
> +
> +             err = HYPERVISOR_grant_table_op(
> +                     GNTTABOP_unmap_grant_ref, unmap, invcount);
> +             BUG_ON(err);
> +     } else if (req->request_bufflen) {
> +             handle = pending_handle(req, 0);
> +             if (handle == SCSIBACK_INVALID_HANDLE)
> +                     return;
> +             gnttab_set_unmap_op(&unmap[0], vaddr(req, 0),
> +                                     GNTMAP_host_map, handle);
> +             pending_handle(req, 0) = SCSIBACK_INVALID_HANDLE;
> +
> +             err = HYPERVISOR_grant_table_op(
> +                     GNTTABOP_unmap_grant_ref, unmap, 1);
> +             BUG_ON(err);
> +     }
> +     
> +     return;
> +}
> +
> +static void make_sense(struct comback_info *info, struct request *req,
> +                    int32_t result, uint16_t rqid)
This does a fair bit more than just making the sense data, so it
probably wants a more descriptive name.

> +{
> +     struct vscsiif_response *ring_res;
> +     int notify, more_to_do = 1;
> +     unsigned long flags;
> +
> +     DPRINTK("%s\n",__FUNCTION__);
> +
> +     spin_lock_irqsave(&info->ring_lock, flags);
> +
> +     ring_res = RING_GET_RESPONSE(&info->ring,
> +                                     info->ring.rsp_prod_pvt);
> +     info->ring.rsp_prod_pvt++;
> +
> +     memset(ring_res->sense_buffer, 0, VSCSIIF_SENSE_BUFFERSIZE);
> +
> +     ring_res->rslt   = result;
> +     ring_res->rqid   = rqid;
> +
> +     if (result != 0 && req != NULL ) {
> +             memcpy(ring_res->sense_buffer,
> +                    req->sense, req->sense_len);
> +             ring_res->sense_len = req->sense_len;
> +     } else
> +             ring_res->sense_len = 0;
> +
> +     RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info->ring, notify);

> +     if (info->ring.rsp_prod_pvt == info->ring.req_cons) {
> +             RING_FINAL_CHECK_FOR_REQUESTS(&info->ring, more_to_do);
> +     } else if (RING_HAS_UNCONSUMED_REQUESTS(&info->ring)) {
> +             more_to_do = 1;
> +     }
Why do you need to check for more requests from here?  The frontend
should be giving you a kick over the event channel when it queues
requests (assuming you've set up req_event correctly and so forth).
That would make this check redundant.

> +
> +     spin_unlock_irqrestore(&info->ring_lock, flags);
> +
> +     if (more_to_do) {
> +             info->waiting_reqs = 1;
> +             wake_up(&info->wq);
> +     }
> +     if (notify)
> +             notify_remote_via_irq(info->irq);
> +}
> +
> +static void scsiback_end_cmd_fn(struct request *req, int error)
> +{
> +     unsigned char sense_buffer[VSCSIIF_SENSE_BUFFERSIZE];
> +     pending_req_t *pending_req = req->end_io_data;
> +     struct comback_info *info = pending_req->info;
> +     pending_req->rq = req;
> +
> +     DPRINTK("%s\n",__FUNCTION__);
> +
> +     if ((req->errors != 0) && (req->cmd[0] != TEST_UNIT_READY)) {
> +
> +             printk(KERN_ERR "scsiback: %d:%d:%d:%d  
> ",info->sdev->host->host_no,
> +                             info->sdev->channel, info->sdev->id, 
> +                             info->sdev->lun);
> +             printk(KERN_ERR "status = 0x%02x, message = 0x%02x, host = 
> 0x%02x, driver = 0x%02x\n",
> +                             status_byte(req->errors), msg_byte(req->errors),
> +                             host_byte(req->errors), 
> driver_byte(req->errors));
> +             memcpy(sense_buffer, req->sense, req->sense_len);
> +
> +             printk(KERN_ERR "scsiback: cmnd[0]=0x%02X use_sg=%d 
> req_buflen=%d\n",
> +                             req->cmd[0],
> +                             pending_req->use_sg, 
> +                             pending_req->request_bufflen);
> +
> +             __scsi_print_sense("scsiback", sense_buffer, req->sense_len);
> +     }
> +
> +     if (atomic_dec_and_test(&pending_req->pendcnt)) {
> +             fast_flush_area(pending_req);
> +
> +             make_sense(pending_req->info, pending_req->rq,
> +                        req->errors, pending_req->rqid);
> +
> +             scsiback_put(pending_req->info);
> +             free_req(pending_req);
> +     }
> +
> +     __blk_put_request(req->q, req);
> +
> +}
> +
> +
> +/* quoted scsi_lib.c/scsi_merge_bio */
> +static int scsiback_merge_bio(struct request *rq, struct bio *bio)
Umm... Why do you need your own merge_bio function?  That sounds like
something best handled by Linux's generic block and scsi subsystems,
rather than doing it in the backend.


> +{
> +     struct request_queue *q = rq->q;
> +
> +     bio->bi_flags &= ~(1 << BIO_SEG_VALID);
> +     if (rq_data_dir(rq) == WRITE)
> +             bio->bi_rw |= (1 << BIO_RW);
> +
> +     blk_queue_bounce(q, &bio);
> +
> +     if (!rq->bio)
> +             blk_rq_bio_prep(q, rq, bio);
> +     else if (!q->back_merge_fn(q, rq, bio))
> +             return -EINVAL;
> +     else {
> +             rq->biotail->bi_next = bio;
> +             rq->biotail          = bio;
> +             rq->hard_nr_sectors += bio_sectors(bio);
> +             rq->nr_sectors       = rq->hard_nr_sectors;
> +     }
> +
> +     return 0;
> +}
> +
> +
> +/* quoted scsi_lib.c/scsi_bi_endio */
> +static int scsiback_bi_endio(struct bio *bio, unsigned int bytes_done, int 
> error)
Again, why do you need this?

> +{
> +     if (bio->bi_size)
> +             return 1;
> +
> +     bio_put(bio);
> +     return 0;
> +}
> +
> +
> +/* quoted scsi_lib.c/scsi_req_map_sg . */
> +static int requset_map_sg(pending_req_t *pending_req, unsigned int count)
??????

Also, this one's missplet.

> +{
> +     struct request *rq = pending_req->rq;
> +     struct request_queue *q = pending_req->rq->q;
> +     int nr_pages;
> +     unsigned int nsegs = count;
> +
> +     unsigned int data_len = 0, len, bytes, off;
> +     struct page *page;
> +     struct bio *bio = NULL;
> +     int i, err, nr_vecs = 0;
> +
> +     for (i = 0; i < nsegs; i++) {
> +             page = virt_to_page(vaddr(pending_req, i));
> +             off = (unsigned int)pending_req->pend_seg[i].offset;
> +             len = (unsigned int)pending_req->pend_seg[i].length;
> +             data_len += len;
> +
> +             nr_pages = (len + off + PAGE_SIZE - 1) >> PAGE_SHIFT;
> +             while (len > 0) {
> +                     bytes = min_t(unsigned int, len, PAGE_SIZE - off);
> +
> +                     if (!bio) {
> +                             nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
> +                             nr_pages -= nr_vecs;
> +                             bio = bio_alloc(GFP_KERNEL, nr_vecs);
> +                             if (!bio) {
> +                                     err = -ENOMEM;
> +                                     goto free_bios;
> +                             }
> +                             bio->bi_end_io = scsiback_bi_endio;
> +                     }
> +
> +                     if (bio_add_pc_page(q, bio, page, bytes, off) !=
> +                                             bytes) {
> +                             bio_put(bio);
> +                             err = -EINVAL;
> +                             goto free_bios;
> +                     }
> +
> +                     if (bio->bi_vcnt >= nr_vecs) {
> +                             err = scsiback_merge_bio(rq, bio);
> +                             if (err) {
> +                                     bio_endio(bio, bio->bi_size, 0);
> +                                     goto free_bios;
> +                             }
> +                             bio = NULL;
> +                     }
> +
> +                     page++;
> +                     len -= bytes;
> +                     off = 0;
> +             }
> +     }
> +
> +     rq->buffer   = rq->data = NULL;
> +     rq->data_len = data_len;
> +
> +     return 0;
> +
> +free_bios:
> +     while ((bio = rq->bio) != NULL) {
> +             rq->bio = bio->bi_next;
> +             /*
> +              * call endio instead of bio_put incase it was bounced
> +              */
> +             bio_endio(bio, bio->bi_size, 0);
> +     }
> +
> +     return err;
> +}
> +
> +int copy_request_ring_info(struct comback_info *info,
> +             struct vscsiif_request *ring_req, pending_req_t *pending_req)
> +{
> +     int write;
> +     char sense[VSCSIIF_SENSE_BUFFERSIZE];
> +     int i;
> +
> +     DPRINTK("%s\n",__FUNCTION__);
> +
> +     pending_req->rqid   = ring_req->rqid;
> +     pending_req->cmd    = ring_req->cmd;
> +
> +     if (ring_req->channel != info->sdev->channel ||
> +             ring_req->id != info->sdev->id ||
> +             ring_req->lun != info->sdev->lun) {
> +             printk(KERN_ERR "scsiback: Device different %d:%d:%d\n",
> +                     ring_req->channel, ring_req->id, ring_req->lun);
> +             BUG();
Umm.  Yeah, letting the frontend cause a BUG() in the backend isn't
really a good idea.

Also, if you're enforcing that the per-request channel, ID, and LUN
always match the per-ring ones, there's not much point in having them
in the request structure.

> +     }
> +
> +     write = (ring_req->sc_data_direction == DMA_TO_DEVICE);
> +     pending_req->data_dir = ring_req->sc_data_direction;
> +     pending_req->rq       =
> +                     blk_get_request(info->sdev->request_queue,
> +                                                     write, GFP_KERNEL);
> +     pending_req->info   = info;
> +     pending_req->use_sg = ring_req->use_sg;
> +     pending_req->request_bufflen = ring_req->request_bufflen;
> +
> +
> +     pending_req->rq->flags  |= REQ_BLOCK_PC;
> +     pending_req->rq->cmd_len = (unsigned int)ring_req->cmd_len;
> +     memcpy(pending_req->rq->cmd, ring_req->cmnd,
> +                ring_req->cmd_len);
You probably want to be checking that cmd_len <= 16 (or whatever your
maximum CDB size is) here, or bad things are going to happen.  Also,
remember that the frontend can still modify ring_req underneath your
feet.

> +
> +     memset(sense, 0, sizeof(sense));
> +     pending_req->rq->sense     = sense;
> +     pending_req->rq->sense_len = VSCSIIF_SENSE_BUFFERSIZE;
> +
> +     pending_req->rq->retries   = 0;
> +     /*pending_req->rq->timeout   = (unsigned 
> int)ring_req->timeout_per_command;*/
> +     pending_req->rq->timeout   = VSCSIIF_TIMEOUT;
> +
> +     pending_req->rq->end_io_data = pending_req;
> +
> +     if (ring_req->use_sg) {
> +             for (i = 0; i < ring_req->use_sg; i++) {
> +                     pending_req->pend_seg[i].gref   = ring_req->seg[i].gref;
> +                     pending_req->pend_seg[i].offset = 
> ring_req->seg[i].offset;
> +                     pending_req->pend_seg[i].length = 
> ring_req->seg[i].length;
> +             }
> +     } else if (ring_req->request_bufflen) {
> +             pending_req->pend_seg[0].gref   = ring_req->seg[0].gref;
> +             pending_req->pend_seg[0].offset = ring_req->seg[0].offset;
> +             pending_req->pend_seg[0].length = ring_req->seg[0].length;
> +     }
Could you not just require that use_sg is never 0 when request_bufflen
is non-zero?


> +     
> +     return 0;
> +}
> +
> +
> +void scsiback_cmd_exec(pending_req_t *pending_req)
> +{
> +
> +     struct gnttab_map_grant_ref map[VSCSIIF_SG_TABLESIZE];
> +     struct comback_info *info = pending_req->info;
> +     
> +     int write = (pending_req->data_dir == DMA_TO_DEVICE);
> +     u32 flags;
> +     int i, err = 0;
> +     unsigned int use_sg = (unsigned int)pending_req->use_sg;
> +
> +     DPRINTK("%s\n",__FUNCTION__);
> +
> +     if (!info->sdev) {
> +             goto fail_response;
> +     }
> +
> +     if (use_sg) {
> +
> +             for (i = 0; i < use_sg; i++) {
> +                     flags = GNTMAP_host_map;
> +                     if (write)
> +                             flags |= GNTMAP_readonly;
> +                     gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
> +                                             pending_req->pend_seg[i].gref,
> +                                             info->domid);
> +             }
> +
> +             err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, 
> use_sg);
> +             BUG_ON(err);
> +
> +             for (i = 0; i < use_sg; i++) {
> +                     if (unlikely(map[i].status != 0)) {
> +                             printk(KERN_ERR "scsiback: invalid buffer -- 
> could not remap it\n");
> +                             map[i].handle = SCSIBACK_INVALID_HANDLE;
> +                             err |= 1;
> +                     }
> +
> +                     pending_handle(pending_req, i) = map[i].handle;
> +
> +                     if (err)
> +                             continue;
> +
> +                     set_phys_to_machine(__pa(vaddr(
> +                             pending_req, i)) >> PAGE_SHIFT,
> +                             FOREIGN_FRAME(map[i].dev_bus_addr >> 
> PAGE_SHIFT));
> +             }
> +
> +             if (err)
> +                     goto fail_flush;
> +
> +             if (requset_map_sg(pending_req, use_sg)) {
> +                     printk(KERN_ERR "scsiback: SG Request Map Error\n");
> +                     goto fail_map;
> +             }
I think you might find it easier to just use scsi_execute_async()
here.  Sure, it'll mean building an SG list for the request, but it
looks like it'll be far less work than going behind the scsi layer's
back and reimplementing everything.

> +
> +     } else if (pending_req->request_bufflen) {
> +
> +             flags = GNTMAP_host_map;
> +             if (write)
> +                     flags |= GNTMAP_readonly;
> +             gnttab_set_map_op(&map[0], vaddr(pending_req, 0), flags,
> +                                     pending_req->pend_seg[0].gref,
> +                                     info->domid);
> +
> +             err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, 1);
> +             BUG_ON(err);
> +
> +             if (unlikely(map[0].status != 0)) {
> +                     printk(KERN_ERR "scsiback: invalid buffer single -- 
> could not remap it\n");
> +                     map[0].handle = SCSIBACK_INVALID_HANDLE;
> +                     err |= 1;
> +             }
> +
> +             pending_handle(pending_req, 0) = map[0].handle;
> +
> +             set_phys_to_machine(__pa(vaddr(
> +                     pending_req, 0)) >> PAGE_SHIFT,
> +                     FOREIGN_FRAME(map[0].dev_bus_addr >> PAGE_SHIFT));
> +
> +             if (err)
> +                     goto fail_flush;
> +
> +             if (requset_map_sg(pending_req, 1)) {
> +                     printk(KERN_ERR "scsiback: SG Request Map Error\n");
> +                     goto fail_map;
> +             }
> +     }
> +
> +     atomic_set(&pending_req->pendcnt, 1);
> +     scsiback_get(info);
> +
> +     blk_execute_rq_nowait(pending_req->rq->q, NULL,
> +                                               pending_req->rq, 1, 
> scsiback_end_cmd_fn);
> +
> +     return ;
> +
> +fail_map:
> +fail_flush:
> +     fast_flush_area(pending_req);
> +
> +fail_response:
> +     make_sense(info, NULL, (DID_NO_CONNECT << 16),
> +                pending_req->rqid);
> +     free_req(pending_req);
> +
> +}
> +
> +
> +void scsiback_reset_exec(pending_req_t *pending_req)
> +{
> +     /* not implemented */
> +     BUG();
Yay!  Another way for the frontend to kill the backend.

> +}
> +
> +

Attachment: signature.asc
Description: Digital signature

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>