|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4 17/17] x86/hvm: track large memory mapped accesses by buffer offset
The code in hvmemul_do_io() that tracks large reads or writes, to avoid
re-issue of component I/O, is defeated by accesses across a page boundary
because it uses physical address. The code is also only relevant to memory
mapped I/O to or from a buffer.
This patch re-factors the code and moves it into hvmemul_phys_mmio_access()
where it is relevant and tracks using buffer offset rather then address.
Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Cc: Keir Fraser <keir@xxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
xen/arch/x86/hvm/emulate.c | 98 ++++++++++++++++------------------------
xen/include/asm-x86/hvm/vcpu.h | 16 ++++---
2 files changed, 48 insertions(+), 66 deletions(-)
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index aa68787..4424dfc 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -107,29 +107,6 @@ static int hvmemul_do_io(
return X86EMUL_UNHANDLEABLE;
}
- if ( is_mmio && !data_is_addr )
- {
- /* Part of a multi-cycle read or write? */
- if ( dir == IOREQ_WRITE )
- {
- paddr_t pa = vio->mmio_large_write_pa;
- unsigned int bytes = vio->mmio_large_write_bytes;
- if ( (addr >= pa) && ((addr + size) <= (pa + bytes)) )
- return X86EMUL_OKAY;
- }
- else
- {
- paddr_t pa = vio->mmio_large_read_pa;
- unsigned int bytes = vio->mmio_large_read_bytes;
- if ( (addr >= pa) && ((addr + size) <= (pa + bytes)) )
- {
- memcpy(p_data, &vio->mmio_large_read[addr - pa],
- size);
- return X86EMUL_OKAY;
- }
- }
- }
-
switch ( vio->io_req.state )
{
case STATE_IOREQ_NONE:
@@ -209,33 +186,6 @@ static int hvmemul_do_io(
memcpy(p_data, &p.data, size);
}
- if ( is_mmio && !data_is_addr )
- {
- /* Part of a multi-cycle read or write? */
- if ( dir == IOREQ_WRITE )
- {
- paddr_t pa = vio->mmio_large_write_pa;
- unsigned int bytes = vio->mmio_large_write_bytes;
- if ( bytes == 0 )
- pa = vio->mmio_large_write_pa = addr;
- if ( addr == (pa + bytes) )
- vio->mmio_large_write_bytes += size;
- }
- else
- {
- paddr_t pa = vio->mmio_large_read_pa;
- unsigned int bytes = vio->mmio_large_read_bytes;
- if ( bytes == 0 )
- pa = vio->mmio_large_read_pa = addr;
- if ( (addr == (pa + bytes)) &&
- ((bytes + size) <= sizeof(vio->mmio_large_read)) )
- {
- memcpy(&vio->mmio_large_read[bytes], p_data, size);
- vio->mmio_large_read_bytes += size;
- }
- }
- }
-
return X86EMUL_OKAY;
}
@@ -601,8 +551,11 @@ static int hvmemul_virtual_to_linear(
}
static int hvmemul_phys_mmio_access(
- paddr_t gpa, unsigned int size, uint8_t dir, uint8_t **buffer)
+ paddr_t gpa, unsigned int size, uint8_t dir, uint8_t *buffer,
+ unsigned int *off)
{
+ struct vcpu *curr = current;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
unsigned long one_rep = 1;
unsigned int chunk;
int rc;
@@ -621,14 +574,41 @@ static int hvmemul_phys_mmio_access(
for ( ;; )
{
- rc = hvmemul_do_mmio_buffer(gpa, &one_rep, chunk, dir, 0,
- *buffer);
- if ( rc != X86EMUL_OKAY )
- break;
+ /* Have we already done this chunk? */
+ if ( (*off + chunk) <= vio->mmio_cache[dir].size )
+ {
+ ASSERT(*off + chunk <= vio->mmio_cache[dir].size);
+
+ if ( dir == IOREQ_READ )
+ memcpy(&buffer[*off],
+ &vio->mmio_cache[IOREQ_READ].buffer[*off],
+ chunk);
+ else
+ {
+ if ( memcmp(&buffer[*off],
+ &vio->mmio_cache[IOREQ_WRITE].buffer[*off],
+ chunk) != 0 )
+ domain_crash(curr->domain);
+ }
+ }
+ else
+ {
+ ASSERT(*off == vio->mmio_cache[dir].size);
+
+ rc = hvmemul_do_mmio_buffer(gpa, &one_rep, chunk, dir, 0,
+ &buffer[*off]);
+ if ( rc != X86EMUL_OKAY )
+ break;
+
+ /* Note that we have now done this chunk */
+ memcpy(&vio->mmio_cache[dir].buffer[*off],
+ &buffer[*off], chunk);
+ vio->mmio_cache[dir].size += chunk;
+ }
/* Advance to the next chunk */
gpa += chunk;
- *buffer += chunk;
+ *off += chunk;
size -= chunk;
if ( size == 0 )
@@ -651,7 +631,7 @@ static int hvmemul_linear_mmio_access(
{
struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
unsigned long page_off = gla & (PAGE_SIZE - 1);
- unsigned int chunk;
+ unsigned int chunk, buffer_off = 0;
paddr_t gpa;
unsigned long one_rep = 1;
int rc;
@@ -670,7 +650,7 @@ static int hvmemul_linear_mmio_access(
for ( ;; )
{
- rc = hvmemul_phys_mmio_access(gpa, chunk, dir, &buffer);
+ rc = hvmemul_phys_mmio_access(gpa, chunk, dir, buffer, &buffer_off);
if ( rc != X86EMUL_OKAY )
break;
@@ -1625,7 +1605,7 @@ static int _hvm_emulate_one(struct hvm_emulate_ctxt
*hvmemul_ctxt,
rc = X86EMUL_RETRY;
if ( rc != X86EMUL_RETRY )
{
- vio->mmio_large_read_bytes = vio->mmio_large_write_bytes = 0;
+ memset(&vio->mmio_cache, 0, sizeof(vio->mmio_cache));
vio->mmio_insn_bytes = 0;
}
else
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 008c8fa..4f41c83 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -61,13 +61,15 @@ struct hvm_vcpu_io {
unsigned long mmio_gva;
unsigned long mmio_gpfn;
- /* We may read up to m256 as a number of device-model transactions. */
- paddr_t mmio_large_read_pa;
- uint8_t mmio_large_read[32];
- unsigned int mmio_large_read_bytes;
- /* We may write up to m256 as a number of device-model transactions. */
- unsigned int mmio_large_write_bytes;
- paddr_t mmio_large_write_pa;
+ /*
+ * We may read or write up to m256 as a number of device-model
+ * transactions.
+ */
+ struct {
+ unsigned long size;
+ uint8_t buffer[32];
+ } mmio_cache[2]; /* Indexed by ioreq type */
+
/* For retries we shouldn't re-fetch the instruction. */
unsigned int mmio_insn_bytes;
unsigned char mmio_insn[16];
--
1.7.10.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |