# HG changeset patch
# User yamahata@xxxxxxxxxxxxx
# Date 1192691054 -32400
# Node ID e57d44a3fa2a53c34a6d282c50be2473d19363a2
# Parent c3d213ef8e7cab3150c84cbd84894221fbf614df
hvm domain io page clean up.
- set_hvm_param hypercall clean up.
- the reference counts of the io pages must be incremented.
- buffered pio wasn't SMP safe.
- clean up get_vio() parameter.
PATCHNAME: vti_domain_iopage_clean_up
Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
diff -r c3d213ef8e7c -r e57d44a3fa2a xen/arch/ia64/vmx/mmio.c
--- a/xen/arch/ia64/vmx/mmio.c Mon Oct 22 12:42:59 2007 +0900
+++ b/xen/arch/ia64/vmx/mmio.c Thu Oct 18 16:04:14 2007 +0900
@@ -57,7 +57,7 @@ static int hvm_buffered_io_intercept(ior
struct vcpu *v = current;
spinlock_t *buffered_io_lock;
buffered_iopage_t *buffered_iopage =
- (buffered_iopage_t *)(v->domain->arch.hvm_domain.buffered_io_va);
+ (buffered_iopage_t *)(v->domain->arch.hvm_domain.buf_ioreq.va);
unsigned long tmp_write_pointer = 0;
int i;
@@ -75,7 +75,7 @@ static int hvm_buffered_io_intercept(ior
if ( i == HVM_BUFFERED_IO_RANGE_NR )
return 0;
- buffered_io_lock = &v->domain->arch.hvm_domain.buffered_io_lock;
+ buffered_io_lock = &v->domain->arch.hvm_domain.buf_ioreq.lock;
spin_lock(buffered_io_lock);
if ( buffered_iopage->write_pointer - buffered_iopage->read_pointer ==
@@ -109,7 +109,7 @@ static void low_mmio_access(VCPU *vcpu,
vcpu_iodata_t *vio;
ioreq_t *p;
- vio = get_vio(v->domain, v->vcpu_id);
+ vio = get_vio(v);
if (vio == 0) {
panic_domain(NULL,"bad shared page: %lx", (unsigned long)vio);
}
@@ -140,7 +140,8 @@ static int vmx_ide_pio_intercept(ioreq_t
static int vmx_ide_pio_intercept(ioreq_t *p, u64 *val)
{
struct buffered_piopage *pio_page =
- (void *)(current->domain->arch.hvm_domain.buffered_pio_va);
+ (void *)(current->domain->arch.hvm_domain.buf_pioreq.va);
+ spinlock_t *pio_lock;
struct pio_buffer *piobuf;
uint32_t pointer, page_offset;
@@ -154,14 +155,17 @@ static int vmx_ide_pio_intercept(ioreq_t
if (p->size != 2 && p->size != 4)
return 0;
+ pio_lock = ¤t->domain->arch.hvm_domain.buf_pioreq.lock;
+ spin_lock(pio_lock);
+
pointer = piobuf->pointer;
page_offset = piobuf->page_offset;
/* sanity check */
if (page_offset + pointer < offsetof(struct buffered_piopage, buffer))
- return 0;
+ goto unlock_out;
if (page_offset + piobuf->data_end > PAGE_SIZE)
- return 0;
+ goto unlock_out;
if (pointer + p->size < piobuf->data_end) {
uint8_t *bufp = (uint8_t *)pio_page + page_offset + pointer;
@@ -179,10 +183,15 @@ static int vmx_ide_pio_intercept(ioreq_t
}
}
piobuf->pointer += p->size;
+ spin_unlock(pio_lock);
+
p->state = STATE_IORESP_READY;
vmx_io_assist(current);
return 1;
}
+
+ unlock_out:
+ spin_unlock(pio_lock);
return 0;
}
@@ -224,7 +233,7 @@ static void legacy_io_access(VCPU *vcpu,
vcpu_iodata_t *vio;
ioreq_t *p;
- vio = get_vio(v->domain, v->vcpu_id);
+ vio = get_vio(v);
if (vio == 0) {
panic_domain(NULL,"bad shared page\n");
}
diff -r c3d213ef8e7c -r e57d44a3fa2a xen/arch/ia64/vmx/vmx_hypercall.c
--- a/xen/arch/ia64/vmx/vmx_hypercall.c Mon Oct 22 12:42:59 2007 +0900
+++ b/xen/arch/ia64/vmx/vmx_hypercall.c Thu Oct 18 16:04:14 2007 +0900
@@ -133,8 +133,34 @@ do_hvm_op(unsigned long op, XEN_GUEST_HA
return -EPERM;
if (op == HVMOP_set_param) {
- d->arch.hvm_domain.params[a.index] = a.value;
- rc = 0;
+ struct vmx_ioreq_page *iorp;
+ struct vcpu *v;
+
+ switch (a.index) {
+ case HVM_PARAM_IOREQ_PFN:
+ iorp = &d->arch.hvm_domain.ioreq;
+ rc = vmx_set_ioreq_page(d, iorp, a.value);
+ spin_lock(&iorp->lock);
+ if ( (rc == 0) && (iorp->va != NULL) )
+ /* Initialise evtchn port info if VCPUs already created. */
+ for_each_vcpu(d, v)
+ get_vio(v)->vp_eport = v->arch.arch_vmx.xen_port;
+ spin_unlock(&iorp->lock);
+ break;
+ case HVM_PARAM_BUFIOREQ_PFN:
+ iorp = &d->arch.hvm_domain.buf_ioreq;
+ rc = vmx_set_ioreq_page(d, iorp, a.value);
+ break;
+ case HVM_PARAM_BUFPIOREQ_PFN:
+ iorp = &d->arch.hvm_domain.buf_pioreq;
+ rc = vmx_set_ioreq_page(d, iorp, a.value);
+ break;
+ default:
+ /* nothing */
+ break;
+ }
+ if (rc == 0)
+ d->arch.hvm_domain.params[a.index] = a.value;
}
else {
a.value = d->arch.hvm_domain.params[a.index];
diff -r c3d213ef8e7c -r e57d44a3fa2a xen/arch/ia64/vmx/vmx_init.c
--- a/xen/arch/ia64/vmx/vmx_init.c Mon Oct 22 12:42:59 2007 +0900
+++ b/xen/arch/ia64/vmx/vmx_init.c Thu Oct 18 16:04:14 2007 +0900
@@ -265,22 +265,44 @@ vmx_load_state(struct vcpu *v)
* anchored in vcpu */
}
-static void vmx_create_event_channels(struct vcpu *v)
-{
- vcpu_iodata_t *p;
+static int
+vmx_vcpu_initialise(struct vcpu *v)
+{
+ struct vmx_ioreq_page *iorp = &v->domain->arch.hvm_domain.ioreq;
+
+ int rc = alloc_unbound_xen_event_channel(v, 0);
+ if (rc < 0)
+ return rc;
+ v->arch.arch_vmx.xen_port = rc;
+
+ spin_lock(&iorp->lock);
+ if (v->domain->arch.vmx_platform.ioreq.va != 0) {
+ vcpu_iodata_t *p = get_vio(v);
+ p->vp_eport = v->arch.arch_vmx.xen_port;
+ }
+ spin_unlock(&iorp->lock);
+
+ gdprintk(XENLOG_INFO, "Allocated port %ld for hvm %d vcpu %d.\n",
+ v->arch.arch_vmx.xen_port, v->domain->domain_id, v->vcpu_id);
+
+ return 0;
+}
+
+static int vmx_create_event_channels(struct vcpu *v)
+{
struct vcpu *o;
if (v->vcpu_id == 0) {
/* Ugly: create event channels for every vcpu when vcpu 0
starts, so that they're available for ioemu to bind to. */
for_each_vcpu(v->domain, o) {
- p = get_vio(v->domain, o->vcpu_id);
- o->arch.arch_vmx.xen_port = p->vp_eport =
- alloc_unbound_xen_event_channel(o, 0);
- gdprintk(XENLOG_INFO, "Allocated port %ld for hvm.\n",
- o->arch.arch_vmx.xen_port);
+ int rc = vmx_vcpu_initialise(o);
+ if (rc < 0) //XXX error recovery
+ return rc;
}
}
+
+ return 0;
}
/*
@@ -290,6 +312,67 @@ static void vmx_release_assist_channel(s
static void vmx_release_assist_channel(struct vcpu *v)
{
return;
+}
+
+/* following three functions are based from hvm_xxx_ioreq_page()
+ * in xen/arch/x86/hvm/hvm.c */
+static void vmx_init_ioreq_page(
+ struct domain *d, struct vmx_ioreq_page *iorp)
+{
+ memset(iorp, 0, sizeof(*iorp));
+ spin_lock_init(&iorp->lock);
+ domain_pause(d);
+}
+
+static void vmx_destroy_ioreq_page(
+ struct domain *d, struct vmx_ioreq_page *iorp)
+{
+ spin_lock(&iorp->lock);
+
+ ASSERT(d->is_dying);
+
+ if (iorp->va != NULL) {
+ put_page(iorp->page);
+ iorp->page = NULL;
+ iorp->va = NULL;
+ }
+
+ spin_unlock(&iorp->lock);
+}
+
+int vmx_set_ioreq_page(
+ struct domain *d, struct vmx_ioreq_page *iorp, unsigned long gpfn)
+{
+ struct page_info *page;
+ unsigned long mfn;
+ pte_t pte;
+
+ pte = *lookup_noalloc_domain_pte(d, gpfn << PAGE_SHIFT);
+ if (!pte_present(pte) || !pte_mem(pte))
+ return -EINVAL;
+ mfn = (pte_val(pte) & _PFN_MASK) >> PAGE_SHIFT;
+ ASSERT(mfn_valid(mfn));
+
+ page = mfn_to_page(mfn);
+ if (get_page(page, d) == 0)
+ return -EINVAL;
+
+ spin_lock(&iorp->lock);
+
+ if ((iorp->va != NULL) || d->is_dying) {
+ spin_unlock(&iorp->lock);
+ put_page(page);
+ return -EINVAL;
+ }
+
+ iorp->va = mfn_to_virt(mfn);
+ iorp->page = page;
+
+ spin_unlock(&iorp->lock);
+
+ domain_unpause(d);
+
+ return 0;
}
/*
@@ -318,7 +401,10 @@ vmx_final_setup_guest(struct vcpu *v)
rc = init_domain_tlb(v);
if (rc)
return rc;
- vmx_create_event_channels(v);
+
+ rc = vmx_create_event_channels(v);
+ if (rc)
+ return rc;
/* v->arch.schedule_tail = arch_vmx_do_launch; */
vmx_create_vp(v);
@@ -346,6 +432,10 @@ vmx_relinquish_guest_resources(struct do
vmx_release_assist_channel(v);
vacpi_relinquish_resources(d);
+
+ vmx_destroy_ioreq_page(d, &d->arch.vmx_platform.ioreq);
+ vmx_destroy_ioreq_page(d, &d->arch.vmx_platform.buf_ioreq);
+ vmx_destroy_ioreq_page(d, &d->arch.vmx_platform.buf_pioreq);
}
void
@@ -391,26 +481,14 @@ static void vmx_build_io_physmap_table(s
int vmx_setup_platform(struct domain *d)
{
- unsigned long mpa;
ASSERT(d != dom0); /* only for non-privileged vti domain */
vmx_build_io_physmap_table(d);
- mpa = __gpa_to_mpa(d, IO_PAGE_START);
- if (mpa == 0)
- return -EINVAL;
- d->arch.vmx_platform.shared_page_va = (unsigned long)__va(mpa);
- /* For buffered IO requests. */
- spin_lock_init(&d->arch.hvm_domain.buffered_io_lock);
-
- mpa = __gpa_to_mpa(d, BUFFER_IO_PAGE_START);
- if (mpa == 0)
- return -EINVAL;
- d->arch.hvm_domain.buffered_io_va = (unsigned long)__va(mpa);
- mpa = __gpa_to_mpa(d, BUFFER_PIO_PAGE_START);
- if (mpa == 0)
- return -EINVAL;
- d->arch.hvm_domain.buffered_pio_va = (unsigned long)__va(mpa);
+ vmx_init_ioreq_page(d, &d->arch.vmx_platform.ioreq);
+ vmx_init_ioreq_page(d, &d->arch.vmx_platform.buf_ioreq);
+ vmx_init_ioreq_page(d, &d->arch.vmx_platform.buf_pioreq);
+
/* TEMP */
d->arch.vmx_platform.pib_base = 0xfee00000UL;
@@ -439,7 +517,7 @@ void vmx_do_resume(struct vcpu *v)
/* stolen from hvm_do_resume() in arch/x86/hvm/hvm.c */
/* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
- p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
+ p = &get_vio(v)->vp_ioreq;
while (p->state != STATE_IOREQ_NONE) {
switch (p->state) {
case STATE_IORESP_READY: /* IORESP_READY -> NONE */
diff -r c3d213ef8e7c -r e57d44a3fa2a xen/arch/ia64/vmx/vmx_support.c
--- a/xen/arch/ia64/vmx/vmx_support.c Mon Oct 22 12:42:59 2007 +0900
+++ b/xen/arch/ia64/vmx/vmx_support.c Thu Oct 18 16:04:14 2007 +0900
@@ -42,7 +42,7 @@ void vmx_io_assist(struct vcpu *v)
* This shared page contains I/O request between emulation code
* and device model.
*/
- vio = get_vio(v->domain, v->vcpu_id);
+ vio = get_vio(v);
if (!vio)
panic_domain(vcpu_regs(v),"Corruption: bad shared page: %lx\n",
(unsigned long)vio);
@@ -65,7 +65,7 @@ void vmx_send_assist_req(struct vcpu *v)
{
ioreq_t *p;
- p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
+ p = &get_vio(v)->vp_ioreq;
if (unlikely(p->state != STATE_IOREQ_NONE)) {
/* This indicates a bug in the device model. Crash the
domain. */
diff -r c3d213ef8e7c -r e57d44a3fa2a xen/include/asm-ia64/vmx.h
--- a/xen/include/asm-ia64/vmx.h Mon Oct 22 12:42:59 2007 +0900
+++ b/xen/include/asm-ia64/vmx.h Thu Oct 18 16:04:14 2007 +0900
@@ -57,8 +57,12 @@ extern void deliver_pal_init(struct vcpu
extern void deliver_pal_init(struct vcpu *vcpu);
extern void vmx_pend_pal_init(struct domain *d);
-static inline vcpu_iodata_t *get_vio(struct domain *d, unsigned long cpu)
+static inline vcpu_iodata_t *get_vio(struct vcpu *v)
{
- return &((shared_iopage_t
*)d->arch.vmx_platform.shared_page_va)->vcpu_iodata[cpu];
+ struct domain *d = v->domain;
+ shared_iopage_t *p = (shared_iopage_t *)d->arch.vmx_platform.ioreq.va;
+ ASSERT((v == current) || spin_is_locked(&d->arch.vmx_platform.ioreq.lock));
+ ASSERT(d->arch.vmx_platform.ioreq.va != NULL);
+ return &p->vcpu_iodata[v->vcpu_id];
}
#endif /* _ASM_IA64_VT_H */
diff -r c3d213ef8e7c -r e57d44a3fa2a xen/include/asm-ia64/vmx_platform.h
--- a/xen/include/asm-ia64/vmx_platform.h Mon Oct 22 12:42:59 2007 +0900
+++ b/xen/include/asm-ia64/vmx_platform.h Thu Oct 18 16:04:14 2007 +0900
@@ -43,17 +43,24 @@
* it is not used on ia64 */
#define OS_TYPE_PORT 0xB2
+struct vmx_ioreq_page {
+ spinlock_t lock;
+ struct page_info *page;
+ void *va;
+};
+int vmx_set_ioreq_page(struct domain *d,
+ struct vmx_ioreq_page *iorp, unsigned long gmfn);
+
typedef struct virtual_platform_def {
- unsigned long gos_type;
- unsigned long buffered_io_va;
- spinlock_t buffered_io_lock;
- unsigned long buffered_pio_va;
- unsigned long shared_page_va;
- unsigned long pib_base;
- unsigned long params[HVM_NR_PARAMS];
+ unsigned long gos_type;
+ struct vmx_ioreq_page ioreq;
+ struct vmx_ioreq_page buf_ioreq;
+ struct vmx_ioreq_page buf_pioreq;
+ unsigned long pib_base;
+ unsigned long params[HVM_NR_PARAMS];
/* One IOSAPIC now... */
- struct viosapic viosapic;
- struct vacpi vacpi;
+ struct viosapic viosapic;
+ struct vacpi vacpi;
} vir_plat_t;
static inline int __fls(uint32_t word)
16183_e57d44a3fa2a_vti_domain_iopage_clean_up.patch
Description: Text Data
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel
|