WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-ia64-devel] [PATCH][RFC][IA64] Accelerate IDE PIO on HVM/IA64

To: xen-devel@xxxxxxxxxxxxxxxxxxx, xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-ia64-devel] [PATCH][RFC][IA64] Accelerate IDE PIO on HVM/IA64
From: Kouya SHIMURA <kouya@xxxxxxxxxxxxxx>
Date: Mon, 4 Dec 2006 11:27:51 +0900
Delivery-date: Sun, 03 Dec 2006 18:28:05 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-ia64-devel-request@lists.xensource.com?subject=help>
List-id: Discussion of the ia64 port of Xen <xen-ia64-devel.lists.xensource.com>
List-post: <mailto:xen-ia64-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-ia64-devel>, <mailto:xen-ia64-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-ia64-devel>, <mailto:xen-ia64-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-ia64-devel-bounces@xxxxxxxxxxxxxxxxxxx
This patch significantly accelerates IDE PIO on HVM/IA64:
* reduces the installation time of Windows 2003 Server
  from 10 hours(!) to 50min.
* accelerates Windows CrashDumping speed from 40KB/sec
  (It takes over three hours for 512MB guest) to 850KB/sec.

All reason for above slowness is the overhead of IDE PIO.
Of course Windows should use DMA mode but we can't handle it.
(FYI. Once installed, Windows usually uses DMA mode)

On the other hand, x86 arch is rescued from this issue since it has a
CISC instruction and multiple PIO requests can be processed in qemu-dm
at one transaction. So this patch gives no benefit for x86.

There are some dirty hacks in this patch:
* To begin with, is it permissive to delegate the part of process of
qemu-dm to hypervisor?
* Currently it uses remnant of buffered_iopage (for VGA).
  Maybe I should prepare another page for IDE PIO.
* May I use "#ifdef __ia64__" ?
* and so on.

Please show me the right way.

Thanks,
Kouya

Signed-off-by: Kouya Shimura <kouya@xxxxxxxxxxxxxx>

diff -r 3bd721db6db5 tools/ioemu/hw/ide.c
--- a/tools/ioemu/hw/ide.c      Sun Dec 03 17:44:14 2006 +0000
+++ b/tools/ioemu/hw/ide.c      Mon Dec 04 09:30:42 2006 +0900
@@ -23,6 +23,10 @@
  */
 #include "vl.h"
 #include <pthread.h>
+
+#ifdef __ia64__
+#include <xen/hvm/ioreq.h>
+#endif
 
 /* debug IDE devices */
 //#define DEBUG_IDE
@@ -349,6 +353,9 @@ typedef struct IDEState {
     uint8_t io_buffer[MAX_MULT_SECTORS*512 + 4];
     QEMUTimer *sector_write_timer; /* only used for win2k instal hack */
     uint32_t irq_count; /* counts IRQs when using win2k install hack */
+#ifdef __ia64__
+    ide_pio_state_t *buffered_ide_pio; 
+#endif
 } IDEState;
 
 #define BM_STATUS_DMAING 0x01
@@ -618,6 +625,17 @@ static void ide_transfer_start(IDEState 
     s->data_ptr = buf;
     s->data_end = buf + size;
     s->status |= DRQ_STAT;
+#ifdef __ia64__
+    if (s->buffered_ide_pio) {
+       if (size > IDE_PIO_BUFFER_SIZE)
+           fprintf(stderr, "CDROM read size:%d\n", size);
+       if (size > IDE_PIO_BUFFER_SIZE)
+           size = IDE_PIO_BUFFER_SIZE;
+       s->buffered_ide_pio->buf_idx = 0;
+       s->buffered_ide_pio->buf_size = size;
+       memcpy(s->buffered_ide_pio->buffer, s->data_ptr, size);
+    }
+#endif
 }
 
 static void ide_transfer_stop(IDEState *s)
@@ -626,6 +644,12 @@ static void ide_transfer_stop(IDEState *
     s->data_ptr = s->io_buffer;
     s->data_end = s->io_buffer;
     s->status &= ~DRQ_STAT;
+#ifdef __ia64__
+    if (s->buffered_ide_pio) {
+       s->buffered_ide_pio->buf_idx = 0;
+       s->buffered_ide_pio->buf_size = 0;
+    }
+#endif
 }
 
 static int64_t ide_get_sector(IDEState *s)
@@ -1961,6 +1985,86 @@ static uint32_t ide_data_readl(void *opa
     return ret;
 }
 
+#ifdef __ia64__
+static void buffered_ide_data_write(void *opaque, uint32_t addr, uint32_t val, 
uint32_t size)
+{
+    IDEState *s = ((IDEState *)opaque)->cur_drive;
+    ide_pio_state_t *ide_pio = s->buffered_ide_pio;
+    uint8_t *p;
+    int buf_size;
+
+    memcpy(s->data_ptr, ide_pio->buffer, ide_pio->buf_idx);
+    p = s->data_ptr + ide_pio->buf_idx;
+    if (size == 2)
+       *(uint16_t *)p = le16_to_cpu(val);
+    else /* size == 4 */
+       *(uint32_t *)p = le32_to_cpu(val);
+    p += size;
+    s->data_ptr = p;
+
+    ide_pio->buf_idx = 0;
+    buf_size = s->data_end - p;
+    if (buf_size > 0) {
+       if (buf_size > IDE_PIO_BUFFER_SIZE)
+           buf_size = IDE_PIO_BUFFER_SIZE;
+       ide_pio->buf_size = buf_size;
+    } else {
+       ide_pio->buf_size = 0;
+        s->end_transfer_func(s);
+    }
+}
+
+static uint32_t buffered_ide_data_read(void *opaque, uint32_t addr, uint32_t 
size)
+{
+    IDEState *s = ((IDEState *)opaque)->cur_drive;
+    ide_pio_state_t *ide_pio = s->buffered_ide_pio;
+    uint8_t *p;
+    int ret;
+    int buf_size;
+
+    p = s->data_ptr + ide_pio->buf_idx;
+    if (size == 2)
+       ret = cpu_to_le16(*(uint16_t *)p);
+    else
+       ret = cpu_to_le32(*(uint32_t *)p);
+    p += size;
+    s->data_ptr = p;
+
+    buf_size = s->data_end - p;
+    if (buf_size > 0) {
+       if (buf_size > IDE_PIO_BUFFER_SIZE)
+           buf_size = IDE_PIO_BUFFER_SIZE;
+       memcpy(ide_pio->buffer, s->data_ptr, buf_size);
+       ide_pio->buf_idx = 0;
+       ide_pio->buf_size = buf_size;
+    } else {
+       ide_pio->buf_size = 0;
+        s->end_transfer_func(s);
+    }
+    return ret;
+}
+
+static void buffered_ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
+{
+    buffered_ide_data_write(opaque, addr, val, 2);
+}
+
+static uint32_t buffered_ide_data_readw(void *opaque, uint32_t addr)
+{
+    return buffered_ide_data_read(opaque, addr, 2);
+}
+
+static void buffered_ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
+{
+    buffered_ide_data_write(opaque, addr, val, 4);
+}
+
+static uint32_t buffered_ide_data_readl(void *opaque, uint32_t addr)
+{
+    return buffered_ide_data_read(opaque, addr, 4);
+}
+#endif
+
 static void ide_dummy_transfer_stop(IDEState *s)
 {
     s->data_ptr = s->io_buffer;
@@ -2121,10 +2225,17 @@ static void ide_init_ioport(IDEState *id
     }
     
     /* data ports */
+#ifdef __ia64__
+    register_ioport_write(iobase, 2, 2, buffered_ide_data_writew, ide_state);
+    register_ioport_read(iobase, 2, 2, buffered_ide_data_readw, ide_state);
+    register_ioport_write(iobase, 4, 4, buffered_ide_data_writel, ide_state);
+    register_ioport_read(iobase, 4, 4, buffered_ide_data_readl, ide_state);
+#else
     register_ioport_write(iobase, 2, 2, ide_data_writew, ide_state);
     register_ioport_read(iobase, 2, 2, ide_data_readw, ide_state);
     register_ioport_write(iobase, 4, 4, ide_data_writel, ide_state);
     register_ioport_read(iobase, 4, 4, ide_data_readl, ide_state);
+#endif
 }
 
 /***********************************************************/
@@ -2495,6 +2606,18 @@ void pci_piix3_ide_init(PCIBus *bus, Blo
               pic_set_irq_new, isa_pic, 15);
     ide_init_ioport(&d->ide_if[0], 0x1f0, 0x3f6);
     ide_init_ioport(&d->ide_if[2], 0x170, 0x376);
+#ifdef __ia64__
+{
+    extern void *buffered_io_page;
+    ide_pio_state_t *ide_pio = (ide_pio_state_t *)(buffered_io_page + 
IDE_PIO_OFFSET_FROM_BUFFERED_IO);
+    d->ide_if[0].buffered_ide_pio = &ide_pio[0];
+    ide_pio[0].buf_idx = 0;
+    ide_pio[0].buf_size = 0;
+    d->ide_if[2].buffered_ide_pio = &ide_pio[1];
+    ide_pio[1].buf_idx = 0;
+    ide_pio[1].buf_size = 0;
+}
+#endif
 #ifdef DMA_MULTI_THREAD    
     dma_create_thread();
 #endif //DMA_MULTI_THREAD    
diff -r 3bd721db6db5 xen/arch/ia64/vmx/mmio.c
--- a/xen/arch/ia64/vmx/mmio.c  Sun Dec 03 17:44:14 2006 +0000
+++ b/xen/arch/ia64/vmx/mmio.c  Mon Dec 04 09:34:53 2006 +0900
@@ -231,6 +231,50 @@ static void low_mmio_access(VCPU *vcpu, 
     }
     return;
 }
+
+int vmx_ide_pio_intercept(ioreq_t *p, u64 *val)
+{
+    struct vcpu *v = current;
+    ide_pio_state_t *ide_pio;
+
+    if (p->addr==0x1f0)                /* primary */
+       ide_pio = (ide_pio_state_t 
*)((v->domain->arch.hvm_domain.buffered_io_va + 
IDE_PIO_OFFSET_FROM_BUFFERED_IO));
+    else if (p->addr==0x170)   /* secondary */
+       ide_pio = (ide_pio_state_t 
*)((v->domain->arch.hvm_domain.buffered_io_va + 
IDE_PIO_OFFSET_FROM_BUFFERED_IO))+1;
+    else
+       return 0;
+
+    if ((p->size != 2) && (p->size != 4))
+       return 0;
+
+    if (ide_pio->buf_idx + p->size < ide_pio->buf_size) {
+       uint8_t *bufp = &ide_pio->buffer[ide_pio->buf_idx];
+       /* we assume cpu is little endian */
+       if (p->dir == IOREQ_WRITE) {
+           if (likely(p->size == 4 && (((long)bufp & 3) == 0)))
+               *(uint32_t *)bufp = *val;
+           if (p->size == 2 && (((long)bufp & 1) == 0))
+               *(uint16_t *)bufp = *val;
+           else
+               memcpy(bufp, val, p->size);
+       } else {
+           if (likely(p->size == 4 && (((long)bufp & 3) == 0)))
+               *val = *(uint32_t *)bufp;
+           if (p->size == 2 && (((long)bufp & 1) == 0))
+               *val = *(uint16_t *)bufp;
+           else {
+               *val = 0;
+               memcpy(val, bufp, p->size);
+           }
+       }
+       ide_pio->buf_idx += p->size;
+       p->state = STATE_IORESP_READY;
+       vmx_io_assist(v);
+       return 1;
+    }
+    return 0;
+}
+
 #define TO_LEGACY_IO(pa)  (((pa)>>12<<2)|((pa)&0x3))
 
 static void legacy_io_access(VCPU *vcpu, u64 pa, u64 *val, size_t s, int dir)
@@ -255,6 +299,9 @@ static void legacy_io_access(VCPU *vcpu,
     p->df = 0;
 
     p->io_count++;
+
+    if (vmx_ide_pio_intercept(p, val))
+       return;
 
     vmx_send_assist_req(v);
     if(dir==IOREQ_READ){ //read
diff -r 3bd721db6db5 xen/include/public/hvm/ioreq.h
--- a/xen/include/public/hvm/ioreq.h    Sun Dec 03 17:44:14 2006 +0000
+++ b/xen/include/public/hvm/ioreq.h    Mon Dec 04 09:38:55 2006 +0900
@@ -80,6 +80,18 @@ struct buffered_iopage {
 };            /* sizeof this structure must be in one page */
 typedef struct buffered_iopage buffered_iopage_t;
 
+/* ide_pio_state is used for IA64 only.                                  */
+/* On IA64, buffered_iopage occupies only the quarter of one page(16KB). */
+/* ide_pio_state is allocated in remnant of the page.                    */
+#define IDE_PIO_OFFSET_FROM_BUFFERED_IO ((sizeof(buffered_iopage_t)+7)&~7)
+#define IDE_PIO_BUFFER_SIZE 4096
+struct ide_pio_state {
+    uint32_t buf_idx;
+    uint32_t buf_size;
+    uint8_t  buffer[IDE_PIO_BUFFER_SIZE];
+};
+typedef struct ide_pio_state ide_pio_state_t;
+
 #define ACPI_PM1A_EVT_BLK_ADDRESS           0x0000000000001f40
 #define ACPI_PM1A_CNT_BLK_ADDRESS           (ACPI_PM1A_EVT_BLK_ADDRESS + 0x04)
 #define ACPI_PM_TMR_BLK_ADDRESS             (ACPI_PM1A_EVT_BLK_ADDRESS + 0x08)
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel