|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH V3 02/23] x86/ioreq: Add IOREQ_STATUS_* #define-s and update code for moving
From: Oleksandr Tyshchenko <oleksandr_tyshchenko@xxxxxxxx>
This patch continues to make some preparation to x86/hvm/ioreq.c
before moving to the common code.
Add IOREQ_STATUS_* #define-s and update candidates for moving
since X86EMUL_* shouldn't be exposed to the common code in
that form.
This support is going to be used on Arm to be able run device
emulator outside of Xen hypervisor.
Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@xxxxxxxx>
CC: Julien Grall <julien.grall@xxxxxxx>
---
Please note, this is a split/cleanup/hardening of Julien's PoC:
"Add support for Guest IO forwarding to a device emulator"
Changes V2 -> V3:
- new patch, was split from
[PATCH V2 01/23] x86/ioreq: Prepare IOREQ feature for making it common
---
---
xen/arch/x86/hvm/ioreq.c | 16 ++++++++--------
xen/include/asm-x86/hvm/ioreq.h | 4 ++++
2 files changed, 12 insertions(+), 8 deletions(-)
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index e3dfb49..9525554 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -1400,7 +1400,7 @@ static int hvm_send_buffered_ioreq(struct
hvm_ioreq_server *s, ioreq_t *p)
pg = iorp->va;
if ( !pg )
- return X86EMUL_UNHANDLEABLE;
+ return IOREQ_STATUS_UNHANDLED;
/*
* Return 0 for the cases we can't deal with:
@@ -1430,7 +1430,7 @@ static int hvm_send_buffered_ioreq(struct
hvm_ioreq_server *s, ioreq_t *p)
break;
default:
gdprintk(XENLOG_WARNING, "unexpected ioreq size: %u\n", p->size);
- return X86EMUL_UNHANDLEABLE;
+ return IOREQ_STATUS_UNHANDLED;
}
spin_lock(&s->bufioreq_lock);
@@ -1440,7 +1440,7 @@ static int hvm_send_buffered_ioreq(struct
hvm_ioreq_server *s, ioreq_t *p)
{
/* The queue is full: send the iopacket through the normal path. */
spin_unlock(&s->bufioreq_lock);
- return X86EMUL_UNHANDLEABLE;
+ return IOREQ_STATUS_UNHANDLED;
}
pg->buf_ioreq[pg->ptrs.write_pointer % IOREQ_BUFFER_SLOT_NUM] = bp;
@@ -1471,7 +1471,7 @@ static int hvm_send_buffered_ioreq(struct
hvm_ioreq_server *s, ioreq_t *p)
notify_via_xen_event_channel(d, s->bufioreq_evtchn);
spin_unlock(&s->bufioreq_lock);
- return X86EMUL_OKAY;
+ return IOREQ_STATUS_HANDLED;
}
int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *proto_p,
@@ -1487,7 +1487,7 @@ int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t
*proto_p,
return hvm_send_buffered_ioreq(s, proto_p);
if ( unlikely(!vcpu_start_shutdown_deferral(curr)) )
- return X86EMUL_RETRY;
+ return IOREQ_STATUS_RETRY;
list_for_each_entry ( sv,
&s->ioreq_vcpu_list,
@@ -1527,11 +1527,11 @@ int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t
*proto_p,
notify_via_xen_event_channel(d, port);
sv->pending = true;
- return X86EMUL_RETRY;
+ return IOREQ_STATUS_RETRY;
}
}
- return X86EMUL_UNHANDLEABLE;
+ return IOREQ_STATUS_UNHANDLED;
}
unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered)
@@ -1545,7 +1545,7 @@ unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool
buffered)
if ( !s->enabled )
continue;
- if ( hvm_send_ioreq(s, p, buffered) == X86EMUL_UNHANDLEABLE )
+ if ( hvm_send_ioreq(s, p, buffered) == IOREQ_STATUS_UNHANDLED )
failed++;
}
diff --git a/xen/include/asm-x86/hvm/ioreq.h b/xen/include/asm-x86/hvm/ioreq.h
index cc79285..e9c8b2d 100644
--- a/xen/include/asm-x86/hvm/ioreq.h
+++ b/xen/include/asm-x86/hvm/ioreq.h
@@ -74,6 +74,10 @@ unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered);
void hvm_ioreq_init(struct domain *d);
+#define IOREQ_STATUS_HANDLED X86EMUL_OKAY
+#define IOREQ_STATUS_UNHANDLED X86EMUL_UNHANDLEABLE
+#define IOREQ_STATUS_RETRY X86EMUL_RETRY
+
#endif /* __ASM_X86_HVM_IOREQ_H__ */
/*
--
2.7.4
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |