[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH v3 09/18] xen/arm: ITS: Add virtual ITS commands support



From: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>

Add Virtual ITS command processing support to Virtual ITS driver

Signed-off-by: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>
---
 xen/arch/arm/gic-v3-its.c  |    7 +
 xen/arch/arm/vgic-v3-its.c |  393 ++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 400 insertions(+)

diff --git a/xen/arch/arm/gic-v3-its.c b/xen/arch/arm/gic-v3-its.c
index 535fc53..2a4fa97 100644
--- a/xen/arch/arm/gic-v3-its.c
+++ b/xen/arch/arm/gic-v3-its.c
@@ -89,6 +89,7 @@ struct its_node {
 
 #define ITS_ITT_ALIGN    SZ_256
 
+static u32 id_bits;
 static LIST_HEAD(its_nodes);
 static DEFINE_SPINLOCK(its_lock);
 static struct rdist_prop  *gic_rdists;
@@ -146,6 +147,11 @@ void dump_cmd(its_cmd_block *cmd)
 }
 #endif
 
+u32 its_get_nr_events(void)
+{
+    return (1 << id_bits);
+}
+
 /* RB-tree helpers for its_device */
 struct its_device * find_its_device(struct rb_root *root, u32 devid)
 {
@@ -1044,6 +1050,7 @@ static int its_probe(struct dt_device_node *node)
     its->phys_size = its_size;
     typer = readl_relaxed(its_base + GITS_TYPER);
     its->ite_size = ((typer >> 4) & 0xf) + 1;
+    id_bits = GITS_TYPER_IDBITS(typer);
 
     its->cmd_base = xzalloc_bytes(ITS_CMD_QUEUE_SZ);
     if ( !its->cmd_base )
diff --git a/xen/arch/arm/vgic-v3-its.c b/xen/arch/arm/vgic-v3-its.c
index ea52a87..0671434 100644
--- a/xen/arch/arm/vgic-v3-its.c
+++ b/xen/arch/arm/vgic-v3-its.c
@@ -256,6 +256,399 @@ int remove_vits_device(struct rb_root *root, struct 
vits_device *dev)
     return 0;
 }
 
+static int vgic_its_process_sync(struct vcpu *v, struct vgic_its *vits,
+                                 its_cmd_block *virt_cmd)
+{
+    /* XXX: Ignored */
+    DPRINTK("vITS:d%dv%d SYNC: ta 0x%x \n",
+             v->domain->domain_id, v->vcpu_id, virt_cmd->sync.ta);
+
+    return 0;
+}
+
+static int vgic_its_process_mapvi(struct vcpu *v, struct vgic_its *vits,
+                                  its_cmd_block *virt_cmd)
+{
+    struct vitt entry;
+    struct vits_device *vdev;
+    uint8_t vcol_id, cmd;
+    uint32_t vid, dev_id, event;
+
+    vcol_id = virt_cmd->mapvi.col;
+    vid = virt_cmd->mapvi.phy_id;
+    dev_id = its_decode_devid(v->domain, virt_cmd);
+    cmd = virt_cmd->mapvi.cmd;
+
+    DPRINTK("vITS:d%dv%d MAPVI: dev_id 0x%x vcol_id %d vid %d \n",
+             v->domain->domain_id, v->vcpu_id, dev_id, vcol_id, vid);
+
+    if ( vcol_id > (v->domain->max_vcpus + 1) ||  vid > its_get_nr_events() )
+        return -EINVAL;
+
+    /* XXX: Enable validation later */
+    vdev = find_vits_device(&v->domain->arch.vits->dev_root, dev_id);
+    if ( !vdev && !vdev->its_dev )
+        return -EINVAL;
+
+    entry.valid = true;
+    entry.vcollection = vcol_id;
+    entry.vlpi = vid;
+
+    if ( cmd == GITS_CMD_MAPI )
+        vits_set_vitt_entry(v->domain, dev_id, vid, &entry);
+    else
+    {
+        event = virt_cmd->mapvi.event;
+        if ( event > its_get_nr_events() )
+            return -EINVAL;
+
+        vits_set_vitt_entry(v->domain, dev_id, event, &entry);
+    }
+
+    return 0;
+}
+
+static int vgic_its_process_movi(struct vcpu *v, struct vgic_its *vits,
+                                 its_cmd_block *virt_cmd)
+{
+    struct vitt entry;
+    struct vits_device *vdev;
+    uint32_t dev_id, event;
+    uint8_t vcol_id;
+
+    dev_id = its_decode_devid(v->domain, virt_cmd);
+    vcol_id = virt_cmd->movi.col;
+    event = virt_cmd->movi.event;
+
+    DPRINTK("vITS:d%dv%d MOVI: dev_id 0x%x vcol_id %d event %d\n",
+            v->domain->domain_id, v->vcpu_id, dev_id, vcol_id, event);
+    if ( vcol_id > (v->domain->max_vcpus + 1)  || event > its_get_nr_events() )
+        return -EINVAL;
+
+    /* Enable validation later when device assignment is done */
+    vdev = find_vits_device(&v->domain->arch.vits->dev_root, dev_id);
+    if ( !vdev && !vdev->its_dev )
+        return -EINVAL;
+
+    if ( vits_get_vitt_entry(v->domain, dev_id, event, &entry) )
+        return -EINVAL;
+    entry.vcollection = vcol_id;
+    if ( vits_set_vitt_entry(v->domain, dev_id, event, &entry) )
+        return -EINVAL;
+
+    return 0;
+}
+   
+static int vgic_its_process_discard(struct vcpu *v, struct vgic_its *vits,
+                                    its_cmd_block *virt_cmd)
+{
+    struct vits_device *vdev;
+    struct vitt entry;
+    uint32_t event, dev_id;
+
+    event = virt_cmd->discard.event;
+    dev_id = its_decode_devid(v->domain, virt_cmd);
+
+    DPRINTK("vITS:d%dv%d DISCARD: dev_id 0x%x id %d\n",
+            v->domain->domain_id, v->vcpu_id, dev_id, event);
+    if ( event > its_get_nr_events() )
+        return -EINVAL;
+
+    /* Validated later */
+    vdev = find_vits_device(&v->domain->arch.vits->dev_root, dev_id);
+    if ( !vdev && !vdev->its_dev )
+        return -EINVAL;
+
+    if ( vits_get_vitt_entry(v->domain, dev_id, event, &entry) )
+        return -EINVAL;
+    entry.valid = false;
+    if ( vits_set_vitt_entry(v->domain, dev_id, event, &entry) )
+        return -EINVAL;
+
+    return 0;
+}
+
+static int vgic_its_process_inv(struct vcpu *v, struct vgic_its *vits,
+                                its_cmd_block *virt_cmd)
+{
+    /* XXX: Ignored */
+    DPRINTK("vITS:d%dv%d INV: dev_id 0x%x id %d\n",
+            v->domain->domain_id, v->vcpu_id, virt_cmd->inv.devid,
+            virt_cmd->inv.event);
+
+    return 0;
+}
+
+static int vgic_its_process_clear(struct vcpu *v, struct vgic_its *vits,
+                                  its_cmd_block *virt_cmd)
+{
+    /* XXX: Ignored */
+    DPRINTK("vITS:d%dv%d CLEAR: dev_id 0x%x id %d\n",
+             v->domain->domain_id, v->vcpu_id, virt_cmd->clear.devid,
+             virt_cmd->clear.event);
+
+    return 0;
+}
+
+static int vgic_its_process_invall(struct vcpu *v, struct vgic_its *vits,
+                                   its_cmd_block *virt_cmd)
+{
+    /* XXX: Ignored */
+    DPRINTK("vITS:d%dv%d INVALL: vCID %d\n",
+            v->domain->domain_id, v->vcpu_id, virt_cmd->invall.col);
+
+    return 0;
+}
+
+static int vgic_its_process_int(struct vcpu *v, struct vgic_its *vits,
+                                its_cmd_block *virt_cmd)
+{
+    struct vitt vitt_entry;
+    uint32_t event, dev_id, col_id;
+    event = virt_cmd->int_cmd.cmd;
+    dev_id = its_decode_devid(v->domain, virt_cmd);
+
+    DPRINTK("vITS:d%dv%d INT: Device 0x%x id %d\n",
+            v->domain->domain_id, v->vcpu_id, dev_id, event);
+    if ( event > its_get_nr_events() )
+        return -EINVAL;
+
+    if ( vits_get_vitt_entry(v->domain, dev_id, event, &vitt_entry) )
+        return -EINVAL;
+
+    if ( !vitt_entry.valid )
+    {
+        dprintk(XENLOG_G_ERR,
+                "vITS:d%dv%d INT CMD invalid, event %d for dev 0x%x\n",
+                v->domain->domain_id, v->vcpu_id, event, dev_id);
+        return -EINVAL;
+    }
+
+    col_id = vitt_entry.vcollection;
+    ASSERT(col_id < v->domain->max_vcpus);
+
+    vgic_vcpu_inject_irq(v->domain->vcpu[col_id], vitt_entry.vlpi);
+
+    return 0;
+}
+
+static int vgic_its_add_device(struct vcpu *v, struct vgic_its *vits,
+                               its_cmd_block *virt_cmd)
+{
+    struct domain *d = v->domain;
+    struct vdevice_table dt_entry;
+    uint32_t devid = its_decode_devid(v->domain, virt_cmd);
+
+    DPRINTK("vITS:d%dv%dd Add device dev_id 0x%x vitt_ipa = 0x%lx size %d\n",
+            v->domain->domain_id, v->vcpu_id, devid, (u64)virt_cmd->mapd.itt 
<< 8,
+            virt_cmd->mapd.size);
+    /* XXX: Validate devid with physical ITS driver ? */
+    if ( virt_cmd->mapd.valid )
+    {
+        /* itt field is 40 bit. extract 48 bit address by shifting */
+        dt_entry.vitt_ipa = virt_cmd->mapd.itt << 8;
+
+        dt_entry.vitt_size = (1 << (virt_cmd->mapd.size + 1)) * 8;
+    }
+    else
+    {
+        dt_entry.vitt_ipa = INVALID_PADDR;
+        dt_entry.vitt_size = 0;
+    }
+
+    if ( vits_set_vdevice_entry(d, devid, &dt_entry) )
+        return -EINVAL;
+
+    return 0;
+}
+
+static int vgic_its_process_mapc(struct vcpu *v, struct vgic_its *vits,
+                                 its_cmd_block *virt_cmd)
+{
+    uint8_t vcol_id;
+    uint64_t vta = 0;
+
+    vcol_id = virt_cmd->mapc.col;
+    vta = virt_cmd->mapc.ta;
+
+    DPRINTK("vITS:d%dv%d MAPC: vCID %d vTA 0x%lx valid %d \n",
+            v->domain->domain_id, v->vcpu_id, vcol_id, vta,
+            virt_cmd->mapc.valid);
+    if ( vcol_id > (v->domain->max_vcpus + 1) || vta > v->domain->max_vcpus )
+        return -EINVAL;
+
+    if ( virt_cmd->mapc.valid )
+        v->domain->arch.vits->collections[vcol_id].target_address = vta;
+    else
+        v->domain->arch.vits->collections[vcol_id].target_address = ~0UL;
+
+    return 0;
+}
+
+static void vgic_its_update_read_ptr(struct vcpu *v, struct vgic_its *vits)
+{
+    vits->cmd_read = vits->cmd_write;
+}
+
+#ifdef DEBUG_ITS
+char *cmd_str[] = {
+        [GITS_CMD_MOVI]    = "MOVI",
+        [GITS_CMD_INT]     = "INT",
+        [GITS_CMD_CLEAR]   = "CLEAR",
+        [GITS_CMD_SYNC]    = "SYNC",
+        [GITS_CMD_MAPD]    = "MAPD",
+        [GITS_CMD_MAPC]    = "MAPC",
+        [GITS_CMD_MAPVI]   = "MAPVI",
+        [GITS_CMD_MAPI]    = "MAPI",
+        [GITS_CMD_INV]     = "INV",
+        [GITS_CMD_INVALL]  = "INVALL",
+        [GITS_CMD_MOVALL]  = "MOVALL",
+        [GITS_CMD_DISCARD] = "DISCARD",
+    };
+#endif
+
+static int vgic_its_parse_its_command(struct vcpu *v, struct vgic_its *vits,
+                                      its_cmd_block *virt_cmd)
+{
+    uint8_t cmd = its_decode_cmd(virt_cmd);
+    int ret;
+
+#ifdef DEBUG_ITS
+    DPRINTK("vITS:d%dv%d Received cmd %s (0x%x)\n",
+            v->domain->domain_id, v->vcpu_id, cmd_str[cmd], cmd);
+    DPRINTK("Dump Virt cmd: ");
+    dump_cmd(virt_cmd);
+#endif
+
+    switch ( cmd )
+    {
+    case GITS_CMD_MAPD:
+        ret = vgic_its_add_device(v, vits, virt_cmd);
+        break;
+    case GITS_CMD_MAPC:
+        ret =  vgic_its_process_mapc(v, vits, virt_cmd);
+        break;
+    case GITS_CMD_MAPI:
+        /* MAPI is same as MAPVI */
+    case GITS_CMD_MAPVI:
+        ret = vgic_its_process_mapvi(v, vits, virt_cmd);
+        break;
+    case GITS_CMD_MOVI:
+        ret = vgic_its_process_movi(v, vits, virt_cmd);
+        break;
+    case GITS_CMD_DISCARD:
+        ret = vgic_its_process_discard(v, vits, virt_cmd);
+        break;
+    case GITS_CMD_INV:
+        ret = vgic_its_process_inv(v, vits, virt_cmd);
+        break;
+    case GITS_CMD_INVALL:
+        ret = vgic_its_process_invall(v, vits, virt_cmd);
+        break;
+    case GITS_CMD_INT:
+        ret = vgic_its_process_int(v, vits, virt_cmd);
+        break;
+    case GITS_CMD_CLEAR:
+        ret = vgic_its_process_clear(v, vits, virt_cmd);
+        break;
+    case GITS_CMD_SYNC:
+        ret = vgic_its_process_sync(v, vits, virt_cmd);
+        break;
+        /*TODO:  GITS_CMD_MOVALL not implemented */
+    default:
+       dprintk(XENLOG_G_ERR, "vITS:d%dv%d Unhandled command cmd %d\n",
+               v->domain->domain_id, v->vcpu_id, cmd);
+       return 1;
+    }
+
+    if ( ret )
+    {
+       dprintk(XENLOG_G_ERR, "vITS:d%dv%d Failed to handle cmd %d\n",
+               v->domain->domain_id, v->vcpu_id, cmd);
+       return 1;
+    }
+
+    return 0;
+}
+
+/* Called with its lock held */
+static int vgic_its_read_virt_cmd(struct vcpu *v,
+                                  struct vgic_its *vits,
+                                  its_cmd_block *virt_cmd)
+{
+    struct page_info * page;
+    uint64_t offset;
+    unsigned long maddr;
+    void *p;
+
+    /* CMD Q can be more than 1 page. Map only page that is required */
+    maddr = ((vits->cmd_base & 0xfffffffff000UL) +
+              vits->cmd_write_save) & PAGE_MASK;
+
+    DPRINTK("vITS:d%dv%d Mapping CMD Q maddr 0x%lx write_save 0x%lx \n",
+            v->domain->domain_id, v->vcpu_id, maddr, vits->cmd_write_save);
+
+    page = get_page_from_gfn(v->domain, paddr_to_pfn(maddr), NULL, P2M_ALLOC);
+    if ( page == NULL )
+    {
+        dprintk(XENLOG_G_ERR, "vITS:d%dv%d Failed to get command page\n",
+                v->domain->domain_id, v->vcpu_id);
+        return -EINVAL;
+    }
+
+    p = __map_domain_page(page);
+
+    /* Offset within the mapped 4K page to read */
+    offset = vits->cmd_write_save & 0xfff;
+
+    memcpy(virt_cmd, p + offset, sizeof(its_cmd_block));
+
+    /* No command queue is created by vits to check on Q full */
+    vits->cmd_write_save += 0x20;
+    if ( vits->cmd_write_save == vits->cmd_qsize )
+    {
+         DPRINTK("vITS:d%dv%d Reset write_save 0x%lx qsize 0x%lx \n",
+                 v->domain->domain_id, v->vcpu_id, vits->cmd_write_save,
+                 vits->cmd_qsize);
+                 vits->cmd_write_save = 0x0;
+    }
+
+    unmap_domain_page(p);
+    put_page(page);
+
+    return 0;
+}
+
+int vgic_its_process_cmd(struct vcpu *v, struct vgic_its *vits)
+{
+    its_cmd_block virt_cmd;
+
+    /* XXX: Currently we are processing one cmd at a time */
+    ASSERT(spin_is_locked(&vits->lock));
+
+    do {
+        if ( vgic_its_read_virt_cmd(v, vits, &virt_cmd) )
+            goto err;
+        if ( vgic_its_parse_its_command(v, vits, &virt_cmd) )
+            goto err;
+    } while ( vits->cmd_write != vits->cmd_write_save );
+
+    vits->cmd_write_save = vits->cmd_write;
+    DPRINTK("vITS:d%dv%d write_save 0x%lx write 0x%lx\n",
+            v->domain->domain_id, v->vcpu_id, vits->cmd_write_save,
+            vits->cmd_write);
+    /* XXX: Currently we are processing one cmd at a time */
+    vgic_its_update_read_ptr(v, vits);
+
+    dsb(ishst);
+
+    return 1;
+err:
+    dprintk(XENLOG_G_ERR, "vITS:d%dv%d Failed to process guest cmd\n",
+            v->domain->domain_id, v->vcpu_id);
+    return 0;
+}
+
 /*
  * Local variables:
  * mode: C
-- 
1.7.9.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.