|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 08/17] x86/hvm: split I/O completion handling from state model
The state of in-flight I/O and how its completion will be handled are
logically separate and conflating the two makes the code unnecessarily
confusing.
Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Cc: Keir Fraser <keir@xxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
xen/arch/x86/hvm/hvm.c | 42 +++++++++++++++++++++++++---------------
xen/arch/x86/hvm/io.c | 4 ++--
xen/include/asm-x86/hvm/vcpu.h | 15 +++++++++-----
3 files changed, 38 insertions(+), 23 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 799f0e7..916459a 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -415,31 +415,41 @@ static void hvm_io_assist(ioreq_t *p)
{
struct vcpu *curr = current;
struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
- enum hvm_io_state io_state;
p->state = STATE_IOREQ_NONE;
- io_state = vio->io_state;
- vio->io_state = HVMIO_none;
-
- switch ( io_state )
+ switch ( vio->io_state )
{
case HVMIO_awaiting_completion:
+ {
+ enum hvm_io_completion completion = vio->io_completion;
+
vio->io_state = HVMIO_completed;
vio->io_data = p->data;
+ vio->io_completion = HVMIO_no_completion;
+
+ switch ( completion )
+ {
+ case HVMIO_mmio_completion:
+ (void)handle_mmio();
+ break;
+
+ case HVMIO_pio_completion:
+ if ( vio->io_size == 4 ) /* Needs zero extension. */
+ guest_cpu_user_regs()->rax = (uint32_t)p->data;
+ else
+ memcpy(&guest_cpu_user_regs()->rax, &p->data, vio->io_size);
+
+ vio->io_state = HVMIO_none;
+ break;
+ default:
+ break;
+ }
+
break;
- case HVMIO_handle_mmio_awaiting_completion:
- vio->io_state = HVMIO_completed;
- vio->io_data = p->data;
- (void)handle_mmio();
- break;
- case HVMIO_handle_pio_awaiting_completion:
- if ( vio->io_size == 4 ) /* Needs zero extension. */
- guest_cpu_user_regs()->rax = (uint32_t)p->data;
- else
- memcpy(&guest_cpu_user_regs()->rax, &p->data, vio->io_size);
- break;
+ }
default:
+ vio->io_state = HVMIO_none;
break;
}
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index ee0ed82..5527988 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -93,7 +93,7 @@ int handle_mmio(void)
if ( rc != X86EMUL_RETRY )
vio->io_state = HVMIO_none;
if ( vio->io_state == HVMIO_awaiting_completion )
- vio->io_state = HVMIO_handle_mmio_awaiting_completion;
+ vio->io_completion = HVMIO_mmio_completion;
else
vio->mmio_access = (struct npfec){};
@@ -158,7 +158,7 @@ int handle_pio(uint16_t port, unsigned int size, int dir)
return 0;
/* Completion in hvm_io_assist() with no re-emulation required. */
ASSERT(dir == IOREQ_READ);
- vio->io_state = HVMIO_handle_pio_awaiting_completion;
+ vio->io_completion = HVMIO_pio_completion;
break;
default:
gdprintk(XENLOG_ERR, "Weird HVM ioemulation status %d.\n", rc);
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 97d78bd..87f9c8f 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -34,11 +34,15 @@ enum hvm_io_state {
HVMIO_none = 0,
HVMIO_dispatched,
HVMIO_awaiting_completion,
- HVMIO_handle_mmio_awaiting_completion,
- HVMIO_handle_pio_awaiting_completion,
HVMIO_completed
};
+enum hvm_io_completion {
+ HVMIO_no_completion = 0,
+ HVMIO_mmio_completion,
+ HVMIO_pio_completion
+};
+
struct hvm_vcpu_asid {
uint64_t generation;
uint32_t asid;
@@ -46,9 +50,10 @@ struct hvm_vcpu_asid {
struct hvm_vcpu_io {
/* I/O request in flight to device model. */
- enum hvm_io_state io_state;
- unsigned long io_data;
- int io_size;
+ enum hvm_io_state io_state;
+ unsigned long io_data;
+ int io_size;
+ enum hvm_io_completion io_completion;
/*
* HVM emulation:
--
1.7.10.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |