[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 01 of 14 V3] amd iommu: Refactoring iommu ring buffer definition



# HG changeset patch
# User Wei Wang <wei.wang2@xxxxxxx>
# Date 1326213576 -3600
# Node ID 9c9ddf2dd700119fdaf8a420fb051c22279853cc
# Parent  5b2676ac13218951698c49fa0350f2ac48220f3d
amd iommu: Refactoring iommu ring buffer definition.
Introduce struct ring_buffer to represent iommu cmd buffer, event log and ppr 
log

Signed-off-by: Wei Wang <wei.wang2@xxxxxxx>

diff -r 5b2676ac1321 -r 9c9ddf2dd700 xen/drivers/passthrough/amd/iommu_cmd.c
--- a/xen/drivers/passthrough/amd/iommu_cmd.c   Mon Jan 09 16:01:44 2012 +0100
+++ b/xen/drivers/passthrough/amd/iommu_cmd.c   Tue Jan 10 17:39:36 2012 +0100
@@ -29,7 +29,7 @@ static int queue_iommu_command(struct am
     u32 tail, head, *cmd_buffer;
     int i;
 
-    tail = iommu->cmd_buffer_tail;
+    tail = iommu->cmd_buffer.tail;
     if ( ++tail == iommu->cmd_buffer.entries )
         tail = 0;
 
@@ -40,13 +40,13 @@ static int queue_iommu_command(struct am
     if ( head != tail )
     {
         cmd_buffer = (u32 *)(iommu->cmd_buffer.buffer +
-                             (iommu->cmd_buffer_tail *
+                             (iommu->cmd_buffer.tail *
                              IOMMU_CMD_BUFFER_ENTRY_SIZE));
 
         for ( i = 0; i < IOMMU_CMD_BUFFER_U32_PER_ENTRY; i++ )
             cmd_buffer[i] = cmd[i];
 
-        iommu->cmd_buffer_tail = tail;
+        iommu->cmd_buffer.tail = tail;
         return 1;
     }
 
@@ -57,7 +57,7 @@ static void commit_iommu_command_buffer(
 {
     u32 tail;
 
-    set_field_in_reg_u32(iommu->cmd_buffer_tail, 0,
+    set_field_in_reg_u32(iommu->cmd_buffer.tail, 0,
                          IOMMU_CMD_BUFFER_TAIL_MASK,
                          IOMMU_CMD_BUFFER_TAIL_SHIFT, &tail);
     writel(tail, iommu->mmio_base+IOMMU_CMD_BUFFER_TAIL_OFFSET);
diff -r 5b2676ac1321 -r 9c9ddf2dd700 xen/drivers/passthrough/amd/iommu_init.c
--- a/xen/drivers/passthrough/amd/iommu_init.c  Mon Jan 09 16:01:44 2012 +0100
+++ b/xen/drivers/passthrough/amd/iommu_init.c  Tue Jan 10 17:39:36 2012 +0100
@@ -294,20 +294,20 @@ static int amd_iommu_read_event_log(stru
                                   IOMMU_EVENT_LOG_TAIL_MASK,
                                   IOMMU_EVENT_LOG_TAIL_SHIFT);
 
-    while ( tail != iommu->event_log_head )
+    while ( tail != iommu->event_log.head )
     {
         /* read event log entry */
         event_log = (u32 *)(iommu->event_log.buffer +
-                           (iommu->event_log_head *
+                           (iommu->event_log.head *
                            IOMMU_EVENT_LOG_ENTRY_SIZE));
 
         parse_event_log_entry(iommu, event_log);
 
-        if ( ++iommu->event_log_head == iommu->event_log.entries )
-            iommu->event_log_head = 0;
+        if ( ++iommu->event_log.head == iommu->event_log.entries )
+            iommu->event_log.head = 0;
 
         /* update head pointer */
-        set_field_in_reg_u32(iommu->event_log_head, 0,
+        set_field_in_reg_u32(iommu->event_log.head, 0,
                              IOMMU_EVENT_LOG_HEAD_MASK,
                              IOMMU_EVENT_LOG_HEAD_SHIFT, &head);
         writel(head, iommu->mmio_base + IOMMU_EVENT_LOG_HEAD_OFFSET);
@@ -346,7 +346,7 @@ static void amd_iommu_reset_event_log(st
     writel(entry, iommu->mmio_base+IOMMU_STATUS_MMIO_OFFSET);
 
     /*reset event log base address */
-    iommu->event_log_head = 0;
+    iommu->event_log.head = 0;
 
     set_iommu_event_log_control(iommu, IOMMU_CONTROL_ENABLED);
 }
@@ -605,71 +605,82 @@ static void enable_iommu(struct amd_iomm
 
 }
 
-static void __init deallocate_iommu_table_struct(
-    struct table_struct *table)
+static void __init deallocate_buffer(void *buf, uint32_t sz)
 {
     int order = 0;
-    if ( table->buffer )
+    if ( buf )
     {
-        order = get_order_from_bytes(table->alloc_size);
-        __free_amd_iommu_tables(table->buffer, order);
-        table->buffer = NULL;
+        order = get_order_from_bytes(sz);
+        __free_amd_iommu_tables(buf, order);
     }
 }
 
-static int __init allocate_iommu_table_struct(struct table_struct *table,
-                                              const char *name)
+static void __init deallocate_device_table(struct table_struct *table)
 {
-    int order = 0;
-    if ( table->buffer == NULL )
-    {
-        order = get_order_from_bytes(table->alloc_size);
-        table->buffer = __alloc_amd_iommu_tables(order);
-
-        if ( table->buffer == NULL )
-        {
-            AMD_IOMMU_DEBUG("Error allocating %s\n", name);
-            return -ENOMEM;
-        }
-        memset(table->buffer, 0, PAGE_SIZE * (1UL << order));
-    }
-    return 0;
+    deallocate_buffer(table->buffer, table->alloc_size);
+    table->buffer = NULL;
 }
 
-static int __init allocate_cmd_buffer(struct amd_iommu *iommu)
+static void __init deallocate_ring_buffer(struct ring_buffer *ring_buf)
+{
+    deallocate_buffer(ring_buf->buffer, ring_buf->alloc_size);
+    ring_buf->buffer = NULL;
+    ring_buf->head = 0;
+    ring_buf->tail = 0;
+}
+
+static void * __init allocate_buffer(uint32_t alloc_size, const char *name)
+{
+    void * buffer;
+    int order = get_order_from_bytes(alloc_size);
+
+    buffer = __alloc_amd_iommu_tables(order);
+
+    if ( buffer == NULL )
+    {
+        AMD_IOMMU_DEBUG("Error allocating %s\n", name);
+        return NULL;
+    }
+
+    memset(buffer, 0, PAGE_SIZE * (1UL << order));
+    return buffer;
+}
+
+static void * __init allocate_ring_buffer(struct ring_buffer *ring_buf,
+                                          uint32_t entry_size, 
+                                          uint64_t entries, const char *name)
+{
+    ring_buf->head = 0;
+    ring_buf->tail = 0;
+
+    ring_buf->alloc_size = PAGE_SIZE << get_order_from_bytes(entries * 
+                                                             entry_size);
+    ring_buf->entries = ring_buf->alloc_size / entry_size;
+    ring_buf->buffer = allocate_buffer(ring_buf->alloc_size, name);
+    return ring_buf->buffer;
+}
+
+static void * __init allocate_cmd_buffer(struct amd_iommu *iommu)
 {
     /* allocate 'command buffer' in power of 2 increments of 4K */
-    iommu->cmd_buffer_tail = 0;
-    iommu->cmd_buffer.alloc_size = PAGE_SIZE <<
-                                   get_order_from_bytes(
-                                   PAGE_ALIGN(IOMMU_CMD_BUFFER_DEFAULT_ENTRIES
-                                              * IOMMU_CMD_BUFFER_ENTRY_SIZE));
-    iommu->cmd_buffer.entries = iommu->cmd_buffer.alloc_size /
-                                IOMMU_CMD_BUFFER_ENTRY_SIZE;
-
-    return (allocate_iommu_table_struct(&iommu->cmd_buffer, "Command Buffer"));
+    return allocate_ring_buffer(&iommu->cmd_buffer, sizeof(cmd_entry_t),
+                                IOMMU_CMD_BUFFER_DEFAULT_ENTRIES, 
+                                "Command Buffer");
 }
 
-static int __init allocate_event_log(struct amd_iommu *iommu)
+static void * __init allocate_event_log(struct amd_iommu *iommu)
 {
-   /* allocate 'event log' in power of 2 increments of 4K */
-    iommu->event_log_head = 0;
-    iommu->event_log.alloc_size = PAGE_SIZE <<
-                                  get_order_from_bytes(
-                                  PAGE_ALIGN(IOMMU_EVENT_LOG_DEFAULT_ENTRIES *
-                                  IOMMU_EVENT_LOG_ENTRY_SIZE));
-    iommu->event_log.entries = iommu->event_log.alloc_size /
-                               IOMMU_EVENT_LOG_ENTRY_SIZE;
-
-    return (allocate_iommu_table_struct(&iommu->event_log, "Event Log"));
+    /* allocate 'event log' in power of 2 increments of 4K */
+    return allocate_ring_buffer(&iommu->event_log, sizeof(event_entry_t),
+                                IOMMU_EVENT_LOG_DEFAULT_ENTRIES, "Event Log");
 }
 
 static int __init amd_iommu_init_one(struct amd_iommu *iommu)
 {
-    if ( allocate_cmd_buffer(iommu) != 0 )
+    if ( allocate_cmd_buffer(iommu) == NULL )
         goto error_out;
 
-    if ( allocate_event_log(iommu) != 0 )
+    if ( allocate_event_log(iommu) == NULL )
         goto error_out;
 
     if ( map_iommu_mmio_region(iommu) != 0 )
@@ -708,8 +719,8 @@ static void __init amd_iommu_init_cleanu
         list_del(&iommu->list);
         if ( iommu->enabled )
         {
-            deallocate_iommu_table_struct(&iommu->cmd_buffer);
-            deallocate_iommu_table_struct(&iommu->event_log);
+            deallocate_ring_buffer(&iommu->cmd_buffer);
+            deallocate_ring_buffer(&iommu->event_log);
             unmap_iommu_mmio_region(iommu);
         }
         xfree(iommu);
@@ -719,7 +730,7 @@ static void __init amd_iommu_init_cleanu
     iterate_ivrs_entries(amd_iommu_free_intremap_table);
 
     /* free device table */
-    deallocate_iommu_table_struct(&device_table);
+    deallocate_device_table(&device_table);
 
     /* free ivrs_mappings[] */
     radix_tree_destroy(&ivrs_maps, xfree);
@@ -830,8 +841,10 @@ static int __init amd_iommu_setup_device
     device_table.entries = device_table.alloc_size /
                            IOMMU_DEV_TABLE_ENTRY_SIZE;
 
-    if ( allocate_iommu_table_struct(&device_table, "Device Table") != 0 )
-         return -ENOMEM;
+    device_table.buffer = allocate_buffer(device_table.alloc_size, 
+                                          "Device Table");
+    if  ( device_table.buffer == NULL )
+        return -ENOMEM;
 
     /* Add device table entries */
     for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
diff -r 5b2676ac1321 -r 9c9ddf2dd700 xen/include/asm-x86/amd-iommu.h
--- a/xen/include/asm-x86/amd-iommu.h   Mon Jan 09 16:01:44 2012 +0100
+++ b/xen/include/asm-x86/amd-iommu.h   Tue Jan 10 17:39:36 2012 +0100
@@ -30,12 +30,42 @@
 
 extern struct list_head amd_iommu_head;
 
+#pragma pack(1)
+typedef struct event_entry
+{
+    uint32_t data[4];
+} event_entry_t;
+
+typedef struct ppr_entry
+{
+    uint32_t data[4];
+} ppr_entry_t;
+
+typedef struct cmd_entry
+{
+    uint32_t data[4];
+} cmd_entry_t;
+
+typedef struct dev_entry
+{
+    uint32_t data[8];
+} dev_entry_t;
+#pragma pack()
+
 struct table_struct {
     void *buffer;
     unsigned long entries;
     unsigned long alloc_size;
 };
 
+struct ring_buffer {
+    void *buffer;
+    unsigned long entries;
+    unsigned long alloc_size;
+    uint32_t tail;
+    uint32_t head;
+};
+
 typedef struct iommu_cap {
     uint32_t header;                    /* offset 00h */
     uint32_t base_low;                  /* offset 04h */
@@ -60,10 +90,8 @@ struct amd_iommu {
     unsigned long mmio_base_phys;
 
     struct table_struct dev_table;
-    struct table_struct cmd_buffer;
-    u32 cmd_buffer_tail;
-    struct table_struct event_log;
-    u32 event_log_head;
+    struct ring_buffer cmd_buffer;
+    struct ring_buffer event_log;
 
     int exclusion_enable;
     int exclusion_allow_all;


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.