|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC PATCH V3 06/12] xen: migrate mem_event to vm_event
This is the second patch in the mem_event -> vm_event migration.
This patch migrates all previous use-cases of mem_event to use the
new vm_event system.
Signed-off-by: Tamas K Lengyel <tamas.lengyel@xxxxxxxxxxxx>
---
MAINTAINERS | 2 +-
tools/libxc/xc_mem_event.c | 8 +--
tools/libxc/xc_mem_paging.c | 18 +++---
tools/libxc/xc_memshr.c | 18 +++---
tools/libxc/xc_vm_event.c | 2 +-
tools/tests/xen-access/xen-access.c | 108 ++++++++++++++++-----------------
tools/xenpaging/pagein.c | 2 +-
tools/xenpaging/xenpaging.c | 118 ++++++++++++++++++------------------
tools/xenpaging/xenpaging.h | 8 +--
xen/arch/x86/domain.c | 2 +-
xen/arch/x86/domctl.c | 4 +-
xen/arch/x86/hvm/emulate.c | 6 +-
xen/arch/x86/hvm/hvm.c | 44 +++++++-------
xen/arch/x86/hvm/vmx/vmcs.c | 4 +-
xen/arch/x86/mm/hap/nested_ept.c | 4 +-
xen/arch/x86/mm/hap/nested_hap.c | 4 +-
xen/arch/x86/mm/mem_paging.c | 4 +-
xen/arch/x86/mm/mem_sharing.c | 30 ++++-----
xen/arch/x86/mm/p2m-pod.c | 4 +-
xen/arch/x86/mm/p2m-pt.c | 4 +-
xen/arch/x86/mm/p2m.c | 94 ++++++++++++++--------------
xen/arch/x86/x86_64/compat/mm.c | 6 +-
xen/arch/x86/x86_64/mm.c | 6 +-
xen/common/domain.c | 12 ++--
xen/common/domctl.c | 8 +--
xen/common/mem_access.c | 26 ++++----
xen/drivers/passthrough/pci.c | 2 +-
xen/include/asm-arm/p2m.h | 6 +-
xen/include/asm-x86/domain.h | 4 +-
xen/include/asm-x86/hvm/emulate.h | 2 +-
xen/include/asm-x86/p2m.h | 8 +--
xen/include/public/domctl.h | 3 +-
xen/include/xen/mem_access.h | 4 +-
xen/include/xen/p2m-common.h | 4 +-
xen/xsm/flask/hooks.c | 4 +-
35 files changed, 291 insertions(+), 292 deletions(-)
diff --git a/MAINTAINERS b/MAINTAINERS
index cff3e5f..b86ab83 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -361,7 +361,7 @@ F: xen/arch/x86/mm/mem_sharing.c
F: xen/arch/x86/mm/mem_paging.c
F: tools/memshr
-MEMORY EVENT AND ACCESS
+VM EVENT AND ACCESS
M: Tim Deegan <tim@xxxxxxx>
S: Supported
F: xen/common/vm_event.c
diff --git a/tools/libxc/xc_mem_event.c b/tools/libxc/xc_mem_event.c
index a5e0948..319cc00 100644
--- a/tools/libxc/xc_mem_event.c
+++ b/tools/libxc/xc_mem_event.c
@@ -29,14 +29,14 @@ int xc_mem_event_control(xc_interface *xch, domid_t
domain_id, unsigned int op,
DECLARE_DOMCTL;
int rc;
- domctl.cmd = XEN_DOMCTL_mem_event_op;
+ domctl.cmd = XEN_DOMCTL_vm_event_op;
domctl.domain = domain_id;
- domctl.u.mem_event_op.op = op;
- domctl.u.mem_event_op.mode = mode;
+ domctl.u.vm_event_op.op = op;
+ domctl.u.vm_event_op.mode = mode;
rc = do_domctl(xch, &domctl);
if ( !rc && port )
- *port = domctl.u.mem_event_op.port;
+ *port = domctl.u.vm_event_op.port;
return rc;
}
diff --git a/tools/libxc/xc_mem_paging.c b/tools/libxc/xc_mem_paging.c
index bf3173d..24cbbeb 100644
--- a/tools/libxc/xc_mem_paging.c
+++ b/tools/libxc/xc_mem_paging.c
@@ -46,19 +46,19 @@ int xc_mem_paging_enable(xc_interface *xch, domid_t
domain_id,
errno = EINVAL;
return -1;
}
-
- return xc_mem_event_control(xch, domain_id,
- XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE,
- XEN_DOMCTL_MEM_EVENT_OP_PAGING,
- port);
+
+ return xc_vm_event_control(xch, domain_id,
+ XEN_DOMCTL_VM_EVENT_OP_PAGING_ENABLE,
+ XEN_DOMCTL_VM_EVENT_OP_PAGING,
+ port);
}
int xc_mem_paging_disable(xc_interface *xch, domid_t domain_id)
{
- return xc_mem_event_control(xch, domain_id,
- XEN_DOMCTL_MEM_EVENT_OP_PAGING_DISABLE,
- XEN_DOMCTL_MEM_EVENT_OP_PAGING,
- NULL);
+ return xc_vm_event_control(xch, domain_id,
+ XEN_DOMCTL_VM_EVENT_OP_PAGING_DISABLE,
+ XEN_DOMCTL_VM_EVENT_OP_PAGING,
+ NULL);
}
int xc_mem_paging_nominate(xc_interface *xch, domid_t domain_id, unsigned long
gfn)
diff --git a/tools/libxc/xc_memshr.c b/tools/libxc/xc_memshr.c
index d6a9539..a7ec92e 100644
--- a/tools/libxc/xc_memshr.c
+++ b/tools/libxc/xc_memshr.c
@@ -51,20 +51,20 @@ int xc_memshr_ring_enable(xc_interface *xch,
errno = EINVAL;
return -1;
}
-
- return xc_mem_event_control(xch, domid,
- XEN_DOMCTL_MEM_EVENT_OP_SHARING_ENABLE,
- XEN_DOMCTL_MEM_EVENT_OP_SHARING,
- port);
+
+ return xc_vm_event_control(xch, domid,
+ XEN_DOMCTL_VM_EVENT_OP_SHARING_ENABLE,
+ XEN_DOMCTL_VM_EVENT_OP_SHARING,
+ port);
}
int xc_memshr_ring_disable(xc_interface *xch,
domid_t domid)
{
- return xc_mem_event_control(xch, domid,
- XEN_DOMCTL_MEM_EVENT_OP_SHARING_DISABLE,
- XEN_DOMCTL_MEM_EVENT_OP_SHARING,
- NULL);
+ return xc_vm_event_control(xch, domid,
+ XEN_DOMCTL_VM_EVENT_OP_SHARING_DISABLE,
+ XEN_DOMCTL_VM_EVENT_OP_SHARING,
+ NULL);
}
static int xc_memshr_memop(xc_interface *xch, domid_t domid,
diff --git a/tools/libxc/xc_vm_event.c b/tools/libxc/xc_vm_event.c
index dda766e..bd14c9d 100644
--- a/tools/libxc/xc_vm_event.c
+++ b/tools/libxc/xc_vm_event.c
@@ -29,7 +29,7 @@ int xc_vm_event_control(xc_interface *xch, domid_t domain_id,
unsigned int op,
DECLARE_DOMCTL;
int rc;
- domctl.cmd = XEN_DOMCTL_mem_event_op;
+ domctl.cmd = XEN_DOMCTL_vm_event_op;
domctl.domain = domain_id;
domctl.u.vm_event_op.op = op;
domctl.u.vm_event_op.mode = mode;
diff --git a/tools/tests/xen-access/xen-access.c
b/tools/tests/xen-access/xen-access.c
index 002dc3e..93b3ec3 100644
--- a/tools/tests/xen-access/xen-access.c
+++ b/tools/tests/xen-access/xen-access.c
@@ -39,7 +39,7 @@
#include <sys/poll.h>
#include <xenctrl.h>
-#include <xen/mem_event.h>
+#include <xen/vm_event.h>
#define DPRINTF(a, b...) fprintf(stderr, a, ## b)
#define ERROR(a, b...) fprintf(stderr, a "\n", ## b)
@@ -91,26 +91,26 @@ static inline int spin_trylock(spinlock_t *lock)
return !test_and_set_bit(1, lock);
}
-#define mem_event_ring_lock_init(_m) spin_lock_init(&(_m)->ring_lock)
-#define mem_event_ring_lock(_m) spin_lock(&(_m)->ring_lock)
-#define mem_event_ring_unlock(_m) spin_unlock(&(_m)->ring_lock)
+#define vm_event_ring_lock_init(_m) spin_lock_init(&(_m)->ring_lock)
+#define vm_event_ring_lock(_m) spin_lock(&(_m)->ring_lock)
+#define vm_event_ring_unlock(_m) spin_unlock(&(_m)->ring_lock)
-typedef struct mem_event {
+typedef struct vm_event {
domid_t domain_id;
xc_evtchn *xce_handle;
int port;
- mem_event_back_ring_t back_ring;
+ vm_event_back_ring_t back_ring;
uint32_t evtchn_port;
void *ring_page;
spinlock_t ring_lock;
-} mem_event_t;
+} vm_event_t;
typedef struct xenaccess {
xc_interface *xc_handle;
xc_domaininfo_t *domain_info;
- mem_event_t mem_event;
+ vm_event_t vm_event;
} xenaccess_t;
static int interrupted;
@@ -170,13 +170,13 @@ int xenaccess_teardown(xc_interface *xch, xenaccess_t
*xenaccess)
return 0;
/* Tear down domain xenaccess in Xen */
- if ( xenaccess->mem_event.ring_page )
- munmap(xenaccess->mem_event.ring_page, XC_PAGE_SIZE);
+ if ( xenaccess->vm_event.ring_page )
+ munmap(xenaccess->vm_event.ring_page, XC_PAGE_SIZE);
if ( mem_access_enable )
{
rc = xc_mem_access_disable(xenaccess->xc_handle,
- xenaccess->mem_event.domain_id);
+ xenaccess->vm_event.domain_id);
if ( rc != 0 )
{
ERROR("Error tearing down domain xenaccess in xen");
@@ -186,8 +186,8 @@ int xenaccess_teardown(xc_interface *xch, xenaccess_t
*xenaccess)
/* Unbind VIRQ */
if ( evtchn_bind )
{
- rc = xc_evtchn_unbind(xenaccess->mem_event.xce_handle,
- xenaccess->mem_event.port);
+ rc = xc_evtchn_unbind(xenaccess->vm_event.xce_handle,
+ xenaccess->vm_event.port);
if ( rc != 0 )
{
ERROR("Error unbinding event port");
@@ -197,7 +197,7 @@ int xenaccess_teardown(xc_interface *xch, xenaccess_t
*xenaccess)
/* Close event channel */
if ( evtchn_open )
{
- rc = xc_evtchn_close(xenaccess->mem_event.xce_handle);
+ rc = xc_evtchn_close(xenaccess->vm_event.xce_handle);
if ( rc != 0 )
{
ERROR("Error closing event channel");
@@ -239,17 +239,17 @@ xenaccess_t *xenaccess_init(xc_interface **xch_r, domid_t
domain_id)
xenaccess->xc_handle = xch;
/* Set domain id */
- xenaccess->mem_event.domain_id = domain_id;
+ xenaccess->vm_event.domain_id = domain_id;
/* Initialise lock */
- mem_event_ring_lock_init(&xenaccess->mem_event);
+ vm_event_ring_lock_init(&xenaccess->vm_event);
/* Enable mem_access */
- xenaccess->mem_event.ring_page =
+ xenaccess->vm_event.ring_page =
xc_mem_access_enable(xenaccess->xc_handle,
- xenaccess->mem_event.domain_id,
- &xenaccess->mem_event.evtchn_port);
- if ( xenaccess->mem_event.ring_page == NULL )
+ xenaccess->vm_event.domain_id,
+ &xenaccess->vm_event.evtchn_port);
+ if ( xenaccess->vm_event.ring_page == NULL )
{
switch ( errno ) {
case EBUSY:
@@ -267,8 +267,8 @@ xenaccess_t *xenaccess_init(xc_interface **xch_r, domid_t
domain_id)
mem_access_enable = 1;
/* Open event channel */
- xenaccess->mem_event.xce_handle = xc_evtchn_open(NULL, 0);
- if ( xenaccess->mem_event.xce_handle == NULL )
+ xenaccess->vm_event.xce_handle = xc_evtchn_open(NULL, 0);
+ if ( xenaccess->vm_event.xce_handle == NULL )
{
ERROR("Failed to open event channel");
goto err;
@@ -276,21 +276,21 @@ xenaccess_t *xenaccess_init(xc_interface **xch_r, domid_t
domain_id)
evtchn_open = 1;
/* Bind event notification */
- rc = xc_evtchn_bind_interdomain(xenaccess->mem_event.xce_handle,
- xenaccess->mem_event.domain_id,
- xenaccess->mem_event.evtchn_port);
+ rc = xc_evtchn_bind_interdomain(xenaccess->vm_event.xce_handle,
+ xenaccess->vm_event.domain_id,
+ xenaccess->vm_event.evtchn_port);
if ( rc < 0 )
{
ERROR("Failed to bind event channel");
goto err;
}
evtchn_bind = 1;
- xenaccess->mem_event.port = rc;
+ xenaccess->vm_event.port = rc;
/* Initialise ring */
- SHARED_RING_INIT((mem_event_sring_t *)xenaccess->mem_event.ring_page);
- BACK_RING_INIT(&xenaccess->mem_event.back_ring,
- (mem_event_sring_t *)xenaccess->mem_event.ring_page,
+ SHARED_RING_INIT((vm_event_sring_t *)xenaccess->vm_event.ring_page);
+ BACK_RING_INIT(&xenaccess->vm_event.back_ring,
+ (vm_event_sring_t *)xenaccess->vm_event.ring_page,
XC_PAGE_SIZE);
/* Get domaininfo */
@@ -320,14 +320,14 @@ xenaccess_t *xenaccess_init(xc_interface **xch_r, domid_t
domain_id)
return NULL;
}
-int get_request(mem_event_t *mem_event, mem_event_request_t *req)
+int get_request(vm_event_t *vm_event, vm_event_request_t *req)
{
- mem_event_back_ring_t *back_ring;
+ vm_event_back_ring_t *back_ring;
RING_IDX req_cons;
- mem_event_ring_lock(mem_event);
+ vm_event_ring_lock(vm_event);
- back_ring = &mem_event->back_ring;
+ back_ring = &vm_event->back_ring;
req_cons = back_ring->req_cons;
/* Copy request */
@@ -338,19 +338,19 @@ int get_request(mem_event_t *mem_event,
mem_event_request_t *req)
back_ring->req_cons = req_cons;
back_ring->sring->req_event = req_cons + 1;
- mem_event_ring_unlock(mem_event);
+ vm_event_ring_unlock(vm_event);
return 0;
}
-static int put_response(mem_event_t *mem_event, mem_event_response_t *rsp)
+static int put_response(vm_event_t *vm_event, vm_event_response_t *rsp)
{
- mem_event_back_ring_t *back_ring;
+ vm_event_back_ring_t *back_ring;
RING_IDX rsp_prod;
- mem_event_ring_lock(mem_event);
+ vm_event_ring_lock(vm_event);
- back_ring = &mem_event->back_ring;
+ back_ring = &vm_event->back_ring;
rsp_prod = back_ring->rsp_prod_pvt;
/* Copy response */
@@ -361,24 +361,24 @@ static int put_response(mem_event_t *mem_event,
mem_event_response_t *rsp)
back_ring->rsp_prod_pvt = rsp_prod;
RING_PUSH_RESPONSES(back_ring);
- mem_event_ring_unlock(mem_event);
+ vm_event_ring_unlock(vm_event);
return 0;
}
-static int xenaccess_resume_page(xenaccess_t *paging, mem_event_response_t
*rsp)
+static int xenaccess_resume_page(xenaccess_t *paging, vm_event_response_t *rsp)
{
int ret;
/* Put the page info on the ring */
- ret = put_response(&paging->mem_event, rsp);
+ ret = put_response(&paging->vm_event, rsp);
if ( ret != 0 )
goto out;
/* Tell Xen page is ready */
- ret = xc_mem_access_resume(paging->xc_handle, paging->mem_event.domain_id);
- ret = xc_evtchn_notify(paging->mem_event.xce_handle,
- paging->mem_event.port);
+ ret = xc_mem_access_resume(paging->xc_handle, paging->vm_event.domain_id);
+ ret = xc_evtchn_notify(paging->vm_event.xce_handle,
+ paging->vm_event.port);
out:
return ret;
@@ -400,8 +400,8 @@ int main(int argc, char *argv[])
struct sigaction act;
domid_t domain_id;
xenaccess_t *xenaccess;
- mem_event_request_t req;
- mem_event_response_t rsp;
+ vm_event_request_t req;
+ vm_event_response_t rsp;
int rc = -1;
int rc1;
xc_interface *xch;
@@ -507,7 +507,7 @@ int main(int argc, char *argv[])
rc = xc_hvm_param_set(xch, domain_id, HVM_PARAM_MEMORY_EVENT_INT3,
HVMPME_mode_disabled);
if ( rc < 0 )
{
- ERROR("Error %d setting int3 mem_event\n", rc);
+ ERROR("Error %d setting int3 vm_event\n", rc);
goto exit;
}
@@ -527,7 +527,7 @@ int main(int argc, char *argv[])
shutting_down = 1;
}
- rc = xc_wait_for_event_or_timeout(xch,
xenaccess->mem_event.xce_handle, 100);
+ rc = xc_wait_for_event_or_timeout(xch, xenaccess->vm_event.xce_handle,
100);
if ( rc < -1 )
{
ERROR("Error getting event");
@@ -539,11 +539,11 @@ int main(int argc, char *argv[])
DPRINTF("Got event from Xen\n");
}
- while ( RING_HAS_UNCONSUMED_REQUESTS(&xenaccess->mem_event.back_ring) )
+ while ( RING_HAS_UNCONSUMED_REQUESTS(&xenaccess->vm_event.back_ring) )
{
xenmem_access_t access;
- rc = get_request(&xenaccess->mem_event, &req);
+ rc = get_request(&xenaccess->vm_event, &req);
if ( rc != 0 )
{
ERROR("Error getting request");
@@ -551,7 +551,7 @@ int main(int argc, char *argv[])
continue;
}
- if ( req.version != MEM_EVENT_INTERFACE_VERSION )
+ if ( req.version != VM_EVENT_INTERFACE_VERSION )
{
ERROR("Error: mem_event interface version mismatch!\n");
interrupted = -1;
@@ -559,12 +559,12 @@ int main(int argc, char *argv[])
}
memset( &rsp, 0, sizeof (rsp) );
- rsp.version = MEM_EVENT_INTERFACE_VERSION;
+ rsp.version = VM_EVENT_INTERFACE_VERSION;
rsp.vcpu_id = req.vcpu_id;
rsp.flags = req.flags;
switch (req.reason) {
- case MEM_EVENT_REASON_MEM_ACCESS:
+ case VM_EVENT_REASON_MEM_ACCESS:
rc = xc_get_mem_access(xch, domain_id,
req.data.mem_access.gfn, &access);
if (rc < 0)
{
@@ -602,7 +602,7 @@ int main(int argc, char *argv[])
rsp.data.mem_access.gfn = req.data.mem_access.gfn;
break;
- case MEM_EVENT_REASON_SOFTWARE_BREAKPOINT:
+ case VM_EVENT_REASON_SOFTWARE_BREAKPOINT:
printf("INT3: rip=%016"PRIx64", gfn=%"PRIx64" (vcpu %d)\n",
req.regs.x86.rip,
req.data.software_breakpoint.gfn,
diff --git a/tools/xenpaging/pagein.c b/tools/xenpaging/pagein.c
index b3bcef7..7cb0f33 100644
--- a/tools/xenpaging/pagein.c
+++ b/tools/xenpaging/pagein.c
@@ -63,7 +63,7 @@ void page_in_trigger(void)
void create_page_in_thread(struct xenpaging *paging)
{
- page_in_args.dom = paging->mem_event.domain_id;
+ page_in_args.dom = paging->vm_event.domain_id;
page_in_args.pagein_queue = paging->pagein_queue;
page_in_args.xch = paging->xc_handle;
if (pthread_create(&page_in_thread, NULL, page_in, &page_in_args) == 0)
diff --git a/tools/xenpaging/xenpaging.c b/tools/xenpaging/xenpaging.c
index e5c5c76..c64ab18 100644
--- a/tools/xenpaging/xenpaging.c
+++ b/tools/xenpaging/xenpaging.c
@@ -63,7 +63,7 @@ static void close_handler(int sig)
static void xenpaging_mem_paging_flush_ioemu_cache(struct xenpaging *paging)
{
struct xs_handle *xsh = paging->xs_handle;
- domid_t domain_id = paging->mem_event.domain_id;
+ domid_t domain_id = paging->vm_event.domain_id;
char path[80];
sprintf(path, "/local/domain/0/device-model/%u/command", domain_id);
@@ -74,7 +74,7 @@ static void xenpaging_mem_paging_flush_ioemu_cache(struct
xenpaging *paging)
static int xenpaging_wait_for_event_or_timeout(struct xenpaging *paging)
{
xc_interface *xch = paging->xc_handle;
- xc_evtchn *xce = paging->mem_event.xce_handle;
+ xc_evtchn *xce = paging->vm_event.xce_handle;
char **vec, *val;
unsigned int num;
struct pollfd fd[2];
@@ -111,7 +111,7 @@ static int xenpaging_wait_for_event_or_timeout(struct
xenpaging *paging)
if ( strcmp(vec[XS_WATCH_TOKEN], watch_token) == 0 )
{
/* If our guest disappeared, set interrupt flag and fall
through */
- if ( xs_is_domain_introduced(paging->xs_handle,
paging->mem_event.domain_id) == false )
+ if ( xs_is_domain_introduced(paging->xs_handle,
paging->vm_event.domain_id) == false )
{
xs_unwatch(paging->xs_handle, "@releaseDomain",
watch_token);
interrupted = SIGQUIT;
@@ -171,7 +171,7 @@ static int xenpaging_get_tot_pages(struct xenpaging *paging)
xc_domaininfo_t domain_info;
int rc;
- rc = xc_domain_getinfolist(xch, paging->mem_event.domain_id, 1,
&domain_info);
+ rc = xc_domain_getinfolist(xch, paging->vm_event.domain_id, 1,
&domain_info);
if ( rc != 1 )
{
PERROR("Error getting domain info");
@@ -231,7 +231,7 @@ static int xenpaging_getopts(struct xenpaging *paging, int
argc, char *argv[])
{
switch(ch) {
case 'd':
- paging->mem_event.domain_id = atoi(optarg);
+ paging->vm_event.domain_id = atoi(optarg);
break;
case 'f':
filename = strdup(optarg);
@@ -264,7 +264,7 @@ static int xenpaging_getopts(struct xenpaging *paging, int
argc, char *argv[])
}
/* Set domain id */
- if ( !paging->mem_event.domain_id )
+ if ( !paging->vm_event.domain_id )
{
printf("Numerical <domain_id> missing!\n");
return 1;
@@ -312,7 +312,7 @@ static struct xenpaging *xenpaging_init(int argc, char
*argv[])
}
/* write domain ID to watch so we can ignore other domain shutdowns */
- snprintf(watch_token, sizeof(watch_token), "%u",
paging->mem_event.domain_id);
+ snprintf(watch_token, sizeof(watch_token), "%u",
paging->vm_event.domain_id);
if ( xs_watch(paging->xs_handle, "@releaseDomain", watch_token) == false )
{
PERROR("Could not bind to shutdown watch\n");
@@ -320,7 +320,7 @@ static struct xenpaging *xenpaging_init(int argc, char
*argv[])
}
/* Watch xenpagings working target */
- dom_path = xs_get_domain_path(paging->xs_handle,
paging->mem_event.domain_id);
+ dom_path = xs_get_domain_path(paging->xs_handle,
paging->vm_event.domain_id);
if ( !dom_path )
{
PERROR("Could not find domain path\n");
@@ -339,17 +339,17 @@ static struct xenpaging *xenpaging_init(int argc, char
*argv[])
}
/* Map the ring page */
- xc_get_hvm_param(xch, paging->mem_event.domain_id,
+ xc_get_hvm_param(xch, paging->vm_event.domain_id,
HVM_PARAM_PAGING_RING_PFN, &ring_pfn);
mmap_pfn = ring_pfn;
- paging->mem_event.ring_page =
- xc_map_foreign_batch(xch, paging->mem_event.domain_id,
+ paging->vm_event.ring_page =
+ xc_map_foreign_batch(xch, paging->vm_event.domain_id,
PROT_READ | PROT_WRITE, &mmap_pfn, 1);
if ( mmap_pfn & XEN_DOMCTL_PFINFO_XTAB )
{
/* Map failed, populate ring page */
rc = xc_domain_populate_physmap_exact(paging->xc_handle,
- paging->mem_event.domain_id,
+ paging->vm_event.domain_id,
1, 0, 0, &ring_pfn);
if ( rc != 0 )
{
@@ -358,8 +358,8 @@ static struct xenpaging *xenpaging_init(int argc, char
*argv[])
}
mmap_pfn = ring_pfn;
- paging->mem_event.ring_page =
- xc_map_foreign_batch(xch, paging->mem_event.domain_id,
+ paging->vm_event.ring_page =
+ xc_map_foreign_batch(xch, paging->vm_event.domain_id,
PROT_READ | PROT_WRITE, &mmap_pfn, 1);
if ( mmap_pfn & XEN_DOMCTL_PFINFO_XTAB )
{
@@ -369,8 +369,8 @@ static struct xenpaging *xenpaging_init(int argc, char
*argv[])
}
/* Initialise Xen */
- rc = xc_mem_paging_enable(xch, paging->mem_event.domain_id,
- &paging->mem_event.evtchn_port);
+ rc = xc_mem_paging_enable(xch, paging->vm_event.domain_id,
+ &paging->vm_event.evtchn_port);
if ( rc != 0 )
{
switch ( errno ) {
@@ -394,40 +394,40 @@ static struct xenpaging *xenpaging_init(int argc, char
*argv[])
}
/* Open event channel */
- paging->mem_event.xce_handle = xc_evtchn_open(NULL, 0);
- if ( paging->mem_event.xce_handle == NULL )
+ paging->vm_event.xce_handle = xc_evtchn_open(NULL, 0);
+ if ( paging->vm_event.xce_handle == NULL )
{
PERROR("Failed to open event channel");
goto err;
}
/* Bind event notification */
- rc = xc_evtchn_bind_interdomain(paging->mem_event.xce_handle,
- paging->mem_event.domain_id,
- paging->mem_event.evtchn_port);
+ rc = xc_evtchn_bind_interdomain(paging->vm_event.xce_handle,
+ paging->vm_event.domain_id,
+ paging->vm_event.evtchn_port);
if ( rc < 0 )
{
PERROR("Failed to bind event channel");
goto err;
}
- paging->mem_event.port = rc;
+ paging->vm_event.port = rc;
/* Initialise ring */
- SHARED_RING_INIT((mem_event_sring_t *)paging->mem_event.ring_page);
- BACK_RING_INIT(&paging->mem_event.back_ring,
- (mem_event_sring_t *)paging->mem_event.ring_page,
+ SHARED_RING_INIT((vm_event_sring_t *)paging->vm_event.ring_page);
+ BACK_RING_INIT(&paging->vm_event.back_ring,
+ (vm_event_sring_t *)paging->vm_event.ring_page,
PAGE_SIZE);
/* Now that the ring is set, remove it from the guest's physmap */
if ( xc_domain_decrease_reservation_exact(xch,
- paging->mem_event.domain_id, 1, 0, &ring_pfn) )
+ paging->vm_event.domain_id, 1, 0, &ring_pfn) )
PERROR("Failed to remove ring from guest physmap");
/* Get max_pages from guest if not provided via cmdline */
if ( !paging->max_pages )
{
- rc = xc_domain_getinfolist(xch, paging->mem_event.domain_id, 1,
+ rc = xc_domain_getinfolist(xch, paging->vm_event.domain_id, 1,
&domain_info);
if ( rc != 1 )
{
@@ -497,9 +497,9 @@ static struct xenpaging *xenpaging_init(int argc, char
*argv[])
free(paging->paging_buffer);
}
- if ( paging->mem_event.ring_page )
+ if ( paging->vm_event.ring_page )
{
- munmap(paging->mem_event.ring_page, PAGE_SIZE);
+ munmap(paging->vm_event.ring_page, PAGE_SIZE);
}
free(dom_path);
@@ -524,28 +524,28 @@ static void xenpaging_teardown(struct xenpaging *paging)
paging->xc_handle = NULL;
/* Tear down domain paging in Xen */
- munmap(paging->mem_event.ring_page, PAGE_SIZE);
- rc = xc_mem_paging_disable(xch, paging->mem_event.domain_id);
+ munmap(paging->vm_event.ring_page, PAGE_SIZE);
+ rc = xc_mem_paging_disable(xch, paging->vm_event.domain_id);
if ( rc != 0 )
{
PERROR("Error tearing down domain paging in xen");
}
/* Unbind VIRQ */
- rc = xc_evtchn_unbind(paging->mem_event.xce_handle,
paging->mem_event.port);
+ rc = xc_evtchn_unbind(paging->vm_event.xce_handle, paging->vm_event.port);
if ( rc != 0 )
{
PERROR("Error unbinding event port");
}
- paging->mem_event.port = -1;
+ paging->vm_event.port = -1;
/* Close event channel */
- rc = xc_evtchn_close(paging->mem_event.xce_handle);
+ rc = xc_evtchn_close(paging->vm_event.xce_handle);
if ( rc != 0 )
{
PERROR("Error closing event channel");
}
- paging->mem_event.xce_handle = NULL;
+ paging->vm_event.xce_handle = NULL;
/* Close connection to xenstore */
xs_close(paging->xs_handle);
@@ -558,12 +558,12 @@ static void xenpaging_teardown(struct xenpaging *paging)
}
}
-static void get_request(struct mem_event *mem_event, mem_event_request_t *req)
+static void get_request(struct vm_event *vm_event, vm_event_request_t *req)
{
- mem_event_back_ring_t *back_ring;
+ vm_event_back_ring_t *back_ring;
RING_IDX req_cons;
- back_ring = &mem_event->back_ring;
+ back_ring = &vm_event->back_ring;
req_cons = back_ring->req_cons;
/* Copy request */
@@ -575,12 +575,12 @@ static void get_request(struct mem_event *mem_event,
mem_event_request_t *req)
back_ring->sring->req_event = req_cons + 1;
}
-static void put_response(struct mem_event *mem_event, mem_event_response_t
*rsp)
+static void put_response(struct vm_event *vm_event, vm_event_response_t *rsp)
{
- mem_event_back_ring_t *back_ring;
+ vm_event_back_ring_t *back_ring;
RING_IDX rsp_prod;
- back_ring = &mem_event->back_ring;
+ back_ring = &vm_event->back_ring;
rsp_prod = back_ring->rsp_prod_pvt;
/* Copy response */
@@ -607,7 +607,7 @@ static int xenpaging_evict_page(struct xenpaging *paging,
unsigned long gfn, int
DECLARE_DOMCTL;
/* Nominate page */
- ret = xc_mem_paging_nominate(xch, paging->mem_event.domain_id, gfn);
+ ret = xc_mem_paging_nominate(xch, paging->vm_event.domain_id, gfn);
if ( ret < 0 )
{
/* unpageable gfn is indicated by EBUSY */
@@ -619,7 +619,7 @@ static int xenpaging_evict_page(struct xenpaging *paging,
unsigned long gfn, int
}
/* Map page */
- page = xc_map_foreign_pages(xch, paging->mem_event.domain_id, PROT_READ,
&victim, 1);
+ page = xc_map_foreign_pages(xch, paging->vm_event.domain_id, PROT_READ,
&victim, 1);
if ( page == NULL )
{
PERROR("Error mapping page %lx", gfn);
@@ -641,7 +641,7 @@ static int xenpaging_evict_page(struct xenpaging *paging,
unsigned long gfn, int
munmap(page, PAGE_SIZE);
/* Tell Xen to evict page */
- ret = xc_mem_paging_evict(xch, paging->mem_event.domain_id, gfn);
+ ret = xc_mem_paging_evict(xch, paging->vm_event.domain_id, gfn);
if ( ret < 0 )
{
/* A gfn in use is indicated by EBUSY */
@@ -671,10 +671,10 @@ static int xenpaging_evict_page(struct xenpaging *paging,
unsigned long gfn, int
return ret;
}
-static int xenpaging_resume_page(struct xenpaging *paging,
mem_event_response_t *rsp, int notify_policy)
+static int xenpaging_resume_page(struct xenpaging *paging, vm_event_response_t
*rsp, int notify_policy)
{
/* Put the page info on the ring */
- put_response(&paging->mem_event, rsp);
+ put_response(&paging->vm_event, rsp);
/* Notify policy of page being paged in */
if ( notify_policy )
@@ -693,7 +693,7 @@ static int xenpaging_resume_page(struct xenpaging *paging,
mem_event_response_t
}
/* Tell Xen page is ready */
- return xc_evtchn_notify(paging->mem_event.xce_handle,
paging->mem_event.port);
+ return xc_evtchn_notify(paging->vm_event.xce_handle,
paging->vm_event.port);
}
static int xenpaging_populate_page(struct xenpaging *paging, unsigned long
gfn, int i)
@@ -715,7 +715,7 @@ static int xenpaging_populate_page(struct xenpaging
*paging, unsigned long gfn,
do
{
/* Tell Xen to allocate a page for the domain */
- ret = xc_mem_paging_load(xch, paging->mem_event.domain_id, gfn,
paging->paging_buffer);
+ ret = xc_mem_paging_load(xch, paging->vm_event.domain_id, gfn,
paging->paging_buffer);
if ( ret < 0 )
{
if ( errno == ENOMEM )
@@ -857,8 +857,8 @@ int main(int argc, char *argv[])
{
struct sigaction act;
struct xenpaging *paging;
- mem_event_request_t req;
- mem_event_response_t rsp;
+ vm_event_request_t req;
+ vm_event_response_t rsp;
int num, prev_num = 0;
int slot;
int tot_pages;
@@ -874,7 +874,7 @@ int main(int argc, char *argv[])
}
xch = paging->xc_handle;
- DPRINTF("starting %s for domain_id %u with pagefile %s\n", argv[0],
paging->mem_event.domain_id, filename);
+ DPRINTF("starting %s for domain_id %u with pagefile %s\n", argv[0],
paging->vm_event.domain_id, filename);
/* ensure that if we get a signal, we'll do cleanup, then exit */
act.sa_handler = close_handler;
@@ -903,12 +903,12 @@ int main(int argc, char *argv[])
DPRINTF("Got event from Xen\n");
}
- while ( RING_HAS_UNCONSUMED_REQUESTS(&paging->mem_event.back_ring) )
+ while ( RING_HAS_UNCONSUMED_REQUESTS(&paging->vm_event.back_ring) )
{
/* Indicate possible error */
rc = 1;
- get_request(&paging->mem_event, &req);
+ get_request(&paging->vm_event, &req);
if ( req.data.mem_paging.gfn > paging->max_pages )
{
@@ -929,7 +929,7 @@ int main(int argc, char *argv[])
goto out;
}
- if ( req.flags & MEM_EVENT_FLAG_DROP_PAGE )
+ if ( req.flags & VM_EVENT_FLAG_DROP_PAGE )
{
DPRINTF("drop_page ^ gfn %"PRIx64" pageslot %d\n",
req.data.mem_paging.gfn, slot);
/* Notify policy of page being dropped */
@@ -966,13 +966,13 @@ int main(int argc, char *argv[])
{
DPRINTF("page %s populated (domain = %d; vcpu = %d;"
" gfn = %"PRIx64"; paused = %d; evict_fail = %d)\n",
- req.flags & MEM_EVENT_FLAG_EVICT_FAIL ? "not" :
"already",
- paging->mem_event.domain_id, req.vcpu_id,
req.data.mem_paging.gfn,
- !!(req.flags & MEM_EVENT_FLAG_VCPU_PAUSED) ,
- !!(req.flags & MEM_EVENT_FLAG_EVICT_FAIL) );
+ req.flags & VM_EVENT_FLAG_EVICT_FAIL ? "not" :
"already",
+ paging->vm_event.domain_id, req.vcpu_id,
req.data.mem_paging.gfn,
+ !!(req.flags & VM_EVENT_FLAG_VCPU_PAUSED) ,
+ !!(req.flags & VM_EVENT_FLAG_EVICT_FAIL) );
/* Tell Xen to resume the vcpu */
- if (( req.flags & MEM_EVENT_FLAG_VCPU_PAUSED ) || ( req.flags
& MEM_EVENT_FLAG_EVICT_FAIL ))
+ if (( req.flags & VM_EVENT_FLAG_VCPU_PAUSED ) || ( req.flags &
VM_EVENT_FLAG_EVICT_FAIL ))
{
/* Prepare the response */
rsp.data.mem_paging.gfn = req.data.mem_paging.gfn;
diff --git a/tools/xenpaging/xenpaging.h b/tools/xenpaging/xenpaging.h
index 877db2f..25d511d 100644
--- a/tools/xenpaging/xenpaging.h
+++ b/tools/xenpaging/xenpaging.h
@@ -27,15 +27,15 @@
#include <xc_private.h>
#include <xen/event_channel.h>
-#include <xen/mem_event.h>
+#include <xen/vm_event.h>
#define XENPAGING_PAGEIN_QUEUE_SIZE 64
-struct mem_event {
+struct vm_event {
domid_t domain_id;
xc_evtchn *xce_handle;
int port;
- mem_event_back_ring_t back_ring;
+ vm_event_back_ring_t back_ring;
uint32_t evtchn_port;
void *ring_page;
};
@@ -51,7 +51,7 @@ struct xenpaging {
void *paging_buffer;
- struct mem_event mem_event;
+ struct vm_event vm_event;
int fd;
/* number of pages for which data structures were allocated */
int max_pages;
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index c8832c6..9dc689d 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -432,7 +432,7 @@ int vcpu_initialise(struct vcpu *v)
v->arch.flags = TF_kernel_mode;
/* By default, do not emulate */
- v->arch.mem_event.emulate_flags = 0;
+ v->arch.vm_event.emulate_flags = 0;
rc = mapcache_vcpu_init(v);
if ( rc )
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 82365a4..3951ed3 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -30,8 +30,8 @@
#include <xen/hypercall.h> /* for arch_do_domctl */
#include <xsm/xsm.h>
#include <xen/iommu.h>
-#include <xen/mem_event.h>
-#include <public/mem_event.h>
+#include <xen/vm_event.h>
+#include <public/vm_event.h>
#include <asm/mem_sharing.h>
#include <asm/xstate.h>
#include <asm/debugger.h>
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 2ed4344..fa7175a 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -407,7 +407,7 @@ static int hvmemul_virtual_to_linear(
* The chosen maximum is very conservative but it's what we use in
* hvmemul_linear_to_phys() so there is no point in using a larger value.
* If introspection has been enabled for this domain, *reps should be
- * at most 1, since optimization might otherwise cause a single mem_event
+ * at most 1, since optimization might otherwise cause a single vm_event
* being triggered for repeated writes to a whole page.
*/
*reps = min_t(unsigned long, *reps,
@@ -1521,7 +1521,7 @@ int hvm_emulate_one_no_write(
return _hvm_emulate_one(hvmemul_ctxt, &hvm_emulate_ops_no_write);
}
-void hvm_mem_event_emulate_one(bool_t nowrite, unsigned int trapnr,
+void hvm_vm_event_emulate_one(bool_t nowrite, unsigned int trapnr,
unsigned int errcode)
{
struct hvm_emulate_ctxt ctx = {{ 0 }};
@@ -1538,7 +1538,7 @@ void hvm_mem_event_emulate_one(bool_t nowrite, unsigned
int trapnr,
{
case X86EMUL_RETRY:
/*
- * This function is called when handling an EPT-related mem_event
+ * This function is called when handling an EPT-related vm_event
* reply. As such, nothing else needs to be done here, since simply
* returning makes the current instruction cause a page fault again,
* consistent with X86EMUL_RETRY.
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 692b7d9..ce82447 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -35,7 +35,7 @@
#include <xen/paging.h>
#include <xen/cpu.h>
#include <xen/wait.h>
-#include <xen/mem_event.h>
+#include <xen/vm_event.h>
#include <xen/mem_access.h>
#include <xen/rangeset.h>
#include <asm/shadow.h>
@@ -66,7 +66,7 @@
#include <public/hvm/ioreq.h>
#include <public/version.h>
#include <public/memory.h>
-#include <public/mem_event.h>
+#include <public/vm_event.h>
#include <public/arch-x86/cpuid.h>
bool_t __read_mostly hvm_enabled;
@@ -2717,7 +2717,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long
gla,
struct p2m_domain *p2m;
int rc, fall_through = 0, paged = 0;
int sharing_enomem = 0;
- mem_event_request_t *req_ptr = NULL;
+ vm_event_request_t *req_ptr = NULL;
/* On Nested Virtualization, walk the guest page table.
* If this succeeds, all is fine.
@@ -2787,7 +2787,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long
gla,
{
bool_t violation;
- /* If the access is against the permissions, then send to mem_event */
+ /* If the access is against the permissions, then send to vm_event */
switch (p2ma)
{
case p2m_access_n:
@@ -6261,7 +6261,7 @@ int hvm_debug_op(struct vcpu *v, int32_t op)
return rc;
}
-static void hvm_mem_event_fill_regs(mem_event_request_t *req)
+static void hvm_mem_event_fill_regs(vm_event_request_t *req)
{
const struct cpu_user_regs *regs = guest_cpu_user_regs();
const struct vcpu *curr = current;
@@ -6293,7 +6293,7 @@ static void hvm_mem_event_fill_regs(mem_event_request_t
*req)
req->regs.x86.cr4 = curr->arch.hvm_vcpu.guest_cr[4];
}
-static int hvm_memory_event_traps(long parameters, mem_event_request_t *req)
+static int hvm_memory_event_traps(long parameters, vm_event_request_t *req)
{
int rc;
struct vcpu *v = current;
@@ -6302,7 +6302,7 @@ static int hvm_memory_event_traps(long parameters,
mem_event_request_t *req)
if ( !(parameters & HVMPME_MODE_MASK) )
return 0;
- rc = mem_event_claim_slot(d, &d->mem_event->monitor);
+ rc = vm_event_claim_slot(d, &d->vm_event->monitor);
if ( rc == -ENOSYS )
{
/* If there was no ring to handle the event, then
@@ -6314,20 +6314,20 @@ static int hvm_memory_event_traps(long parameters,
mem_event_request_t *req)
if ( (parameters & HVMPME_MODE_MASK) == HVMPME_mode_sync )
{
- req->flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
- mem_event_vcpu_pause(v);
+ req->flags |= VM_EVENT_FLAG_VCPU_PAUSED;
+ vm_event_vcpu_pause(v);
}
hvm_mem_event_fill_regs(req);
- mem_event_put_request(d, &d->mem_event->monitor, req);
+ vm_event_put_request(d, &d->vm_event->monitor, req);
return 1;
}
void hvm_memory_event_cr0(unsigned long value, unsigned long old)
{
- mem_event_request_t req = {
- .reason = MEM_EVENT_REASON_MOV_TO_CR0,
+ vm_event_request_t req = {
+ .reason = VM_EVENT_REASON_MOV_TO_CR0,
.vcpu_id = current->vcpu_id,
.data.mov_to_cr.new_value = value,
.data.mov_to_cr.old_value = old
@@ -6344,8 +6344,8 @@ void hvm_memory_event_cr0(unsigned long value, unsigned
long old)
void hvm_memory_event_cr3(unsigned long value, unsigned long old)
{
- mem_event_request_t req = {
- .reason = MEM_EVENT_REASON_MOV_TO_CR3,
+ vm_event_request_t req = {
+ .reason = VM_EVENT_REASON_MOV_TO_CR3,
.vcpu_id = current->vcpu_id,
.data.mov_to_cr.new_value = value,
.data.mov_to_cr.old_value = old
@@ -6362,8 +6362,8 @@ void hvm_memory_event_cr3(unsigned long value, unsigned
long old)
void hvm_memory_event_cr4(unsigned long value, unsigned long old)
{
- mem_event_request_t req = {
- .reason = MEM_EVENT_REASON_MOV_TO_CR4,
+ vm_event_request_t req = {
+ .reason = VM_EVENT_REASON_MOV_TO_CR4,
.vcpu_id = current->vcpu_id,
.data.mov_to_cr.new_value = value,
.data.mov_to_cr.old_value = old
@@ -6380,8 +6380,8 @@ void hvm_memory_event_cr4(unsigned long value, unsigned
long old)
void hvm_memory_event_msr(unsigned long msr, unsigned long value)
{
- mem_event_request_t req = {
- .reason = MEM_EVENT_REASON_MOV_TO_MSR,
+ vm_event_request_t req = {
+ .reason = VM_EVENT_REASON_MOV_TO_MSR,
.vcpu_id = current->vcpu_id,
.data.mov_to_msr.msr = msr,
.data.mov_to_msr.value = value,
@@ -6395,8 +6395,8 @@ void hvm_memory_event_msr(unsigned long msr, unsigned
long value)
int hvm_memory_event_int3(unsigned long gla)
{
uint32_t pfec = PFEC_page_present;
- mem_event_request_t req = {
- .reason = MEM_EVENT_REASON_SOFTWARE_BREAKPOINT,
+ vm_event_request_t req = {
+ .reason = VM_EVENT_REASON_SOFTWARE_BREAKPOINT,
.vcpu_id = current->vcpu_id,
.data.software_breakpoint.gfn = paging_gva_to_gfn(current, gla, &pfec)
};
@@ -6409,8 +6409,8 @@ int hvm_memory_event_int3(unsigned long gla)
int hvm_memory_event_single_step(unsigned long gla)
{
uint32_t pfec = PFEC_page_present;
- mem_event_request_t req = {
- .reason = MEM_EVENT_REASON_SINGLESTEP,
+ vm_event_request_t req = {
+ .reason = VM_EVENT_REASON_SINGLESTEP,
.vcpu_id = current->vcpu_id,
.data.singlestep.gfn = paging_gva_to_gfn(current, gla, &pfec)
};
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index e553fb0..0f2b2e6 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -25,7 +25,7 @@
#include <xen/event.h>
#include <xen/kernel.h>
#include <xen/keyhandler.h>
-#include <xen/mem_event.h>
+#include <xen/vm_event.h>
#include <asm/current.h>
#include <asm/cpufeature.h>
#include <asm/processor.h>
@@ -715,7 +715,7 @@ void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr,
int type)
return;
if ( unlikely(d->arch.hvm_domain.introspection_enabled) &&
- mem_event_check_ring(&d->mem_event->monitor) )
+ vm_event_check_ring(&d->vm_event->monitor) )
{
unsigned int i;
diff --git a/xen/arch/x86/mm/hap/nested_ept.c b/xen/arch/x86/mm/hap/nested_ept.c
index cbbc4e9..40adac3 100644
--- a/xen/arch/x86/mm/hap/nested_ept.c
+++ b/xen/arch/x86/mm/hap/nested_ept.c
@@ -17,9 +17,9 @@
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*/
-#include <xen/mem_event.h>
+#include <xen/vm_event.h>
#include <xen/event.h>
-#include <public/mem_event.h>
+#include <public/vm_event.h>
#include <asm/domain.h>
#include <asm/page.h>
#include <asm/paging.h>
diff --git a/xen/arch/x86/mm/hap/nested_hap.c b/xen/arch/x86/mm/hap/nested_hap.c
index 9c1ec11..cb28943 100644
--- a/xen/arch/x86/mm/hap/nested_hap.c
+++ b/xen/arch/x86/mm/hap/nested_hap.c
@@ -19,9 +19,9 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <xen/mem_event.h>
+#include <xen/vm_event.h>
#include <xen/event.h>
-#include <public/mem_event.h>
+#include <public/vm_event.h>
#include <asm/domain.h>
#include <asm/page.h>
#include <asm/paging.h>
diff --git a/xen/arch/x86/mm/mem_paging.c b/xen/arch/x86/mm/mem_paging.c
index e3d64a6..68b7fcc 100644
--- a/xen/arch/x86/mm/mem_paging.c
+++ b/xen/arch/x86/mm/mem_paging.c
@@ -22,12 +22,12 @@
#include <asm/p2m.h>
-#include <xen/mem_event.h>
+#include <xen/vm_event.h>
int mem_paging_memop(struct domain *d, xen_mem_paging_op_t *mpc)
{
- if ( unlikely(!d->mem_event->paging.ring_page) )
+ if ( unlikely(!d->vm_event->paging.ring_page) )
return -ENODEV;
switch( mpc->op )
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index b5149f7..c487207 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -28,7 +28,7 @@
#include <xen/grant_table.h>
#include <xen/sched.h>
#include <xen/rcupdate.h>
-#include <xen/mem_event.h>
+#include <xen/vm_event.h>
#include <asm/page.h>
#include <asm/string.h>
#include <asm/p2m.h>
@@ -559,25 +559,25 @@ int mem_sharing_notify_enomem(struct domain *d, unsigned
long gfn,
{
struct vcpu *v = current;
int rc;
- mem_event_request_t req = {
- .reason = MEM_EVENT_REASON_MEM_SHARING,
+ vm_event_request_t req = {
+ .reason = VM_EVENT_REASON_MEM_SHARING,
.data.mem_sharing.gfn = gfn
};
- if ( (rc = __mem_event_claim_slot(d,
- &d->mem_event->share, allow_sleep)) < 0 )
+ if ( (rc = __vm_event_claim_slot(d,
+ &d->vm_event->share, allow_sleep)) < 0 )
return rc;
if ( v->domain == d )
{
- req.flags = MEM_EVENT_FLAG_VCPU_PAUSED;
- mem_event_vcpu_pause(v);
+ req.flags = VM_EVENT_FLAG_VCPU_PAUSED;
+ vm_event_vcpu_pause(v);
}
req.data.mem_sharing.p2mt = p2m_ram_shared;
req.vcpu_id = v->vcpu_id;
- mem_event_put_request(d, &d->mem_event->share, &req);
+ vm_event_put_request(d, &d->vm_event->share, &req);
return 0;
}
@@ -594,17 +594,17 @@ unsigned int mem_sharing_get_nr_shared_mfns(void)
int mem_sharing_sharing_resume(struct domain *d)
{
- mem_event_response_t rsp;
+ vm_event_response_t rsp;
/* Get all requests off the ring */
- while ( mem_event_get_response(d, &d->mem_event->share, &rsp) )
+ while ( vm_event_get_response(d, &d->vm_event->share, &rsp) )
{
struct vcpu *v;
- if ( rsp.version != MEM_EVENT_INTERFACE_VERSION )
+ if ( rsp.version != VM_EVENT_INTERFACE_VERSION )
continue;
- if ( rsp.flags & MEM_EVENT_FLAG_DUMMY )
+ if ( rsp.flags & VM_EVENT_FLAG_DUMMY )
continue;
/* Validate the vcpu_id in the response. */
@@ -614,8 +614,8 @@ int mem_sharing_sharing_resume(struct domain *d)
v = d->vcpu[rsp.vcpu_id];
/* Unpause domain/vcpu */
- if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
- mem_event_vcpu_unpause(v);
+ if ( rsp.flags & VM_EVENT_FLAG_VCPU_PAUSED )
+ vm_event_vcpu_unpause(v);
}
return 0;
@@ -1142,7 +1142,7 @@ err_out:
/* A note on the rationale for unshare error handling:
* 1. Unshare can only fail with ENOMEM. Any other error conditions BUG_ON()'s
- * 2. We notify a potential dom0 helper through a mem_event ring. But we
+ * 2. We notify a potential dom0 helper through a vm_event ring. But we
* allow the notification to not go to sleep. If the event ring is full
* of ENOMEM warnings, then it's on the ball.
* 3. We cannot go to sleep until the unshare is resolved, because we might
diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
index 43f507c..0679f00 100644
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -21,9 +21,9 @@
*/
#include <xen/iommu.h>
-#include <xen/mem_event.h>
+#include <xen/vm_event.h>
#include <xen/event.h>
-#include <public/mem_event.h>
+#include <public/vm_event.h>
#include <asm/domain.h>
#include <asm/page.h>
#include <asm/paging.h>
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index 26fb18d..e50b6fa 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -26,10 +26,10 @@
*/
#include <xen/iommu.h>
-#include <xen/mem_event.h>
+#include <xen/vm_event.h>
#include <xen/event.h>
#include <xen/trace.h>
-#include <public/mem_event.h>
+#include <public/vm_event.h>
#include <asm/domain.h>
#include <asm/page.h>
#include <asm/paging.h>
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index ddadf66..c7a0bde 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -25,9 +25,9 @@
*/
#include <xen/iommu.h>
-#include <xen/mem_event.h>
+#include <xen/vm_event.h>
#include <xen/event.h>
-#include <public/mem_event.h>
+#include <public/vm_event.h>
#include <asm/domain.h>
#include <asm/page.h>
#include <asm/paging.h>
@@ -1077,8 +1077,8 @@ int p2m_mem_paging_evict(struct domain *d, unsigned long
gfn)
void p2m_mem_paging_drop_page(struct domain *d, unsigned long gfn,
p2m_type_t p2mt)
{
- mem_event_request_t req = {
- .reason = MEM_EVENT_REASON_MEM_PAGING,
+ vm_event_request_t req = {
+ .reason = VM_EVENT_REASON_MEM_PAGING,
.data.mem_paging.gfn = gfn
};
@@ -1086,21 +1086,21 @@ void p2m_mem_paging_drop_page(struct domain *d,
unsigned long gfn,
* correctness of the guest execution at this point. If this is the only
* page that happens to be paged-out, we'll be okay.. but it's likely the
* guest will crash shortly anyways. */
- int rc = mem_event_claim_slot(d, &d->mem_event->paging);
+ int rc = vm_event_claim_slot(d, &d->vm_event->paging);
if ( rc < 0 )
return;
/* Send release notification to pager */
- req.flags = MEM_EVENT_FLAG_DROP_PAGE;
+ req.flags = VM_EVENT_FLAG_DROP_PAGE;
/* Update stats unless the page hasn't yet been evicted */
if ( p2mt != p2m_ram_paging_out )
atomic_dec(&d->paged_pages);
else
/* Evict will fail now, tag this request for pager */
- req.flags |= MEM_EVENT_FLAG_EVICT_FAIL;
+ req.flags |= VM_EVENT_FLAG_EVICT_FAIL;
- mem_event_put_request(d, &d->mem_event->paging, &req);
+ vm_event_put_request(d, &d->vm_event->paging, &req);
}
/**
@@ -1127,8 +1127,8 @@ void p2m_mem_paging_drop_page(struct domain *d, unsigned
long gfn,
void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
{
struct vcpu *v = current;
- mem_event_request_t req = {
- .reason = MEM_EVENT_REASON_MEM_PAGING,
+ vm_event_request_t req = {
+ .reason = VM_EVENT_REASON_MEM_PAGING,
.data.mem_paging.gfn = gfn
};
p2m_type_t p2mt;
@@ -1137,7 +1137,7 @@ void p2m_mem_paging_populate(struct domain *d, unsigned
long gfn)
struct p2m_domain *p2m = p2m_get_hostp2m(d);
/* We're paging. There should be a ring */
- int rc = mem_event_claim_slot(d, &d->mem_event->paging);
+ int rc = vm_event_claim_slot(d, &d->vm_event->paging);
if ( rc == -ENOSYS )
{
gdprintk(XENLOG_ERR, "Domain %hu paging gfn %lx yet no ring "
@@ -1159,7 +1159,7 @@ void p2m_mem_paging_populate(struct domain *d, unsigned
long gfn)
{
/* Evict will fail now, tag this request for pager */
if ( p2mt == p2m_ram_paging_out )
- req.flags |= MEM_EVENT_FLAG_EVICT_FAIL;
+ req.flags |= VM_EVENT_FLAG_EVICT_FAIL;
p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_in, a);
}
@@ -1168,14 +1168,14 @@ void p2m_mem_paging_populate(struct domain *d, unsigned
long gfn)
/* Pause domain if request came from guest and gfn has paging type */
if ( p2m_is_paging(p2mt) && v->domain == d )
{
- mem_event_vcpu_pause(v);
- req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
+ vm_event_vcpu_pause(v);
+ req.flags |= VM_EVENT_FLAG_VCPU_PAUSED;
}
/* No need to inform pager if the gfn is not in the page-out path */
else if ( p2mt != p2m_ram_paging_out && p2mt != p2m_ram_paged )
{
/* gfn is already on its way back and vcpu is not paused */
- mem_event_cancel_slot(d, &d->mem_event->paging);
+ vm_event_cancel_slot(d, &d->vm_event->paging);
return;
}
@@ -1183,7 +1183,7 @@ void p2m_mem_paging_populate(struct domain *d, unsigned
long gfn)
req.data.mem_paging.p2mt = p2mt;
req.vcpu_id = v->vcpu_id;
- mem_event_put_request(d, &d->mem_event->paging, &req);
+ vm_event_put_request(d, &d->vm_event->paging, &req);
}
/**
@@ -1292,20 +1292,20 @@ int p2m_mem_paging_prep(struct domain *d, unsigned long
gfn, uint64_t buffer)
void p2m_mem_paging_resume(struct domain *d)
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
- mem_event_response_t rsp;
+ vm_event_response_t rsp;
p2m_type_t p2mt;
p2m_access_t a;
mfn_t mfn;
/* Pull all responses off the ring */
- while( mem_event_get_response(d, &d->mem_event->paging, &rsp) )
+ while( vm_event_get_response(d, &d->vm_event->paging, &rsp) )
{
struct vcpu *v;
- if ( rsp.version != MEM_EVENT_INTERFACE_VERSION )
+ if ( rsp.version != VM_EVENT_INTERFACE_VERSION )
continue;
- if ( rsp.flags & MEM_EVENT_FLAG_DUMMY )
+ if ( rsp.flags & VM_EVENT_FLAG_DUMMY )
continue;
/* Validate the vcpu_id in the response. */
@@ -1315,7 +1315,7 @@ void p2m_mem_paging_resume(struct domain *d)
v = d->vcpu[rsp.vcpu_id];
/* Fix p2m entry if the page was not dropped */
- if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) )
+ if ( !(rsp.flags & VM_EVENT_FLAG_DROP_PAGE) )
{
gfn_lock(p2m, rsp.gfn, 0);
mfn = p2m->get_entry(p2m, rsp.data.mem_access.gfn, &p2mt, &a, 0,
NULL);
@@ -1331,12 +1331,12 @@ void p2m_mem_paging_resume(struct domain *d)
gfn_unlock(p2m, rsp.gfn, 0);
}
/* Unpause domain */
- if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
- mem_event_vcpu_unpause(v);
+ if ( rsp.flags & VM_EVENT_FLAG_VCPU_PAUSED )
+ vm_event_vcpu_unpause(v);
}
}
-static void p2m_mem_event_fill_regs(mem_event_request_t *req)
+static void p2m_vm_event_fill_regs(vm_event_request_t *req)
{
const struct cpu_user_regs *regs = guest_cpu_user_regs();
struct segment_register seg;
@@ -1391,10 +1391,10 @@ static void p2m_mem_event_fill_regs(mem_event_request_t
*req)
req->regs.x86.cs_arbytes = seg.attr.bytes;
}
-void p2m_mem_event_emulate_check(struct vcpu *v, const mem_event_response_t
*rsp)
+void p2m_vm_event_emulate_check(struct vcpu *v, const vm_event_response_t *rsp)
{
/* Mark vcpu for skipping one instruction upon rescheduling. */
- if ( rsp->flags & MEM_EVENT_FLAG_EMULATE )
+ if ( rsp->flags & VM_EVENT_FLAG_EMULATE )
{
xenmem_access_t access;
bool_t violation = 1;
@@ -1441,7 +1441,7 @@ void p2m_mem_event_emulate_check(struct vcpu *v, const
mem_event_response_t *rsp
}
}
- v->arch.mem_event.emulate_flags = violation ? rsp->flags : 0;
+ v->arch.vm_event.emulate_flags = violation ? rsp->flags : 0;
}
}
@@ -1456,7 +1456,7 @@ void p2m_setup_introspection(struct domain *d)
bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
struct npfec npfec,
- mem_event_request_t **req_ptr)
+ vm_event_request_t **req_ptr)
{
struct vcpu *v = current;
unsigned long gfn = gpa >> PAGE_SHIFT;
@@ -1465,7 +1465,7 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long
gla,
mfn_t mfn;
p2m_type_t p2mt;
p2m_access_t p2ma;
- mem_event_request_t *req;
+ vm_event_request_t *req;
int rc;
unsigned long eip = guest_cpu_user_regs()->eip;
@@ -1492,13 +1492,13 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long
gla,
gfn_unlock(p2m, gfn, 0);
/* Otherwise, check if there is a memory event listener, and send the
message along */
- if ( !mem_event_check_ring(&d->mem_event->monitor) || !req_ptr )
+ if ( !vm_event_check_ring(&d->vm_event->monitor) || !req_ptr )
{
/* No listener */
if ( p2m->access_required )
{
gdprintk(XENLOG_INFO, "Memory access permissions failure, "
- "no mem_event listener VCPU %d, dom %d\n",
+ "no vm_event listener VCPU %d, dom %d\n",
v->vcpu_id, d->domain_id);
domain_crash(v->domain);
return 0;
@@ -1521,40 +1521,40 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long
gla,
}
}
- /* The previous mem_event reply does not match the current state. */
- if ( v->arch.mem_event.gpa != gpa || v->arch.mem_event.eip != eip )
+ /* The previous vm_event reply does not match the current state. */
+ if ( v->arch.vm_event.gpa != gpa || v->arch.vm_event.eip != eip )
{
- /* Don't emulate the current instruction, send a new mem_event. */
- v->arch.mem_event.emulate_flags = 0;
+ /* Don't emulate the current instruction, send a new vm_event. */
+ v->arch.vm_event.emulate_flags = 0;
/*
* Make sure to mark the current state to match it again against
- * the new mem_event about to be sent.
+ * the new vm_event about to be sent.
*/
- v->arch.mem_event.gpa = gpa;
- v->arch.mem_event.eip = eip;
+ v->arch.vm_event.gpa = gpa;
+ v->arch.vm_event.eip = eip;
}
- if ( v->arch.mem_event.emulate_flags )
+ if ( v->arch.vm_event.emulate_flags )
{
- hvm_mem_event_emulate_one((v->arch.mem_event.emulate_flags &
- MEM_EVENT_FLAG_EMULATE_NOWRITE) != 0,
+ hvm_vm_event_emulate_one((v->arch.vm_event.emulate_flags &
+ VM_EVENT_FLAG_EMULATE_NOWRITE) != 0,
TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
- v->arch.mem_event.emulate_flags = 0;
+ v->arch.vm_event.emulate_flags = 0;
return 1;
}
*req_ptr = NULL;
- req = xzalloc(mem_event_request_t);
+ req = xzalloc(vm_event_request_t);
if ( req )
{
*req_ptr = req;
- req->reason = MEM_EVENT_REASON_MEM_ACCESS;
+ req->reason = VM_EVENT_REASON_MEM_ACCESS;
/* Pause the current VCPU */
if ( p2ma != p2m_access_n2rwx )
- req->flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
+ req->flags |= VM_EVENT_FLAG_VCPU_PAUSED;
/* Send request to mem event */
req->data.mem_access.gfn = gfn;
@@ -1570,12 +1570,12 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long
gla,
req->data.mem_access.access_x = npfec.insn_fetch;
req->vcpu_id = v->vcpu_id;
- p2m_mem_event_fill_regs(req);
+ p2m_vm_event_fill_regs(req);
}
/* Pause the current VCPU */
if ( p2ma != p2m_access_n2rwx )
- mem_event_vcpu_pause(v);
+ vm_event_vcpu_pause(v);
/* VCPU may be paused, return whether we promoted automatically */
return (p2ma == p2m_access_n2rwx);
diff --git a/xen/arch/x86/x86_64/compat/mm.c b/xen/arch/x86/x86_64/compat/mm.c
index c8ea85a..959ccf5 100644
--- a/xen/arch/x86/x86_64/compat/mm.c
+++ b/xen/arch/x86/x86_64/compat/mm.c
@@ -1,5 +1,5 @@
#include <xen/event.h>
-#include <xen/mem_event.h>
+#include <xen/vm_event.h>
#include <xen/mem_access.h>
#include <xen/multicall.h>
#include <compat/memory.h>
@@ -191,7 +191,7 @@ int compat_arch_memory_op(unsigned long cmd,
XEN_GUEST_HANDLE_PARAM(void) arg)
xen_mem_paging_op_t mpo;
if ( copy_from_guest(&mpo, arg, 1) )
return -EFAULT;
- rc = do_mem_event_op(cmd, mpo.domain, &mpo);
+ rc = do_vm_event_op(cmd, mpo.domain, &mpo);
if ( !rc && __copy_to_guest(arg, &mpo, 1) )
return -EFAULT;
break;
@@ -204,7 +204,7 @@ int compat_arch_memory_op(unsigned long cmd,
XEN_GUEST_HANDLE_PARAM(void) arg)
return -EFAULT;
if ( mso.op == XENMEM_sharing_op_audit )
return mem_sharing_audit();
- rc = do_mem_event_op(cmd, mso.domain, &mso);
+ rc = do_vm_event_op(cmd, mso.domain, &mso);
if ( !rc && __copy_to_guest(arg, &mso, 1) )
return -EFAULT;
break;
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 2fa1f67..1e2bd1a 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -26,7 +26,7 @@
#include <xen/nodemask.h>
#include <xen/guest_access.h>
#include <xen/hypercall.h>
-#include <xen/mem_event.h>
+#include <xen/vm_event.h>
#include <xen/mem_access.h>
#include <asm/current.h>
#include <asm/asm_defns.h>
@@ -988,7 +988,7 @@ long subarch_memory_op(unsigned long cmd,
XEN_GUEST_HANDLE_PARAM(void) arg)
xen_mem_paging_op_t mpo;
if ( copy_from_guest(&mpo, arg, 1) )
return -EFAULT;
- rc = do_mem_event_op(cmd, mpo.domain, &mpo);
+ rc = do_vm_event_op(cmd, mpo.domain, &mpo);
if ( !rc && __copy_to_guest(arg, &mpo, 1) )
return -EFAULT;
break;
@@ -1001,7 +1001,7 @@ long subarch_memory_op(unsigned long cmd,
XEN_GUEST_HANDLE_PARAM(void) arg)
return -EFAULT;
if ( mso.op == XENMEM_sharing_op_audit )
return mem_sharing_audit();
- rc = do_mem_event_op(cmd, mso.domain, &mso);
+ rc = do_vm_event_op(cmd, mso.domain, &mso);
if ( !rc && __copy_to_guest(arg, &mso, 1) )
return -EFAULT;
break;
diff --git a/xen/common/domain.c b/xen/common/domain.c
index e02823e..e54ef2d 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -15,7 +15,7 @@
#include <xen/domain.h>
#include <xen/mm.h>
#include <xen/event.h>
-#include <xen/mem_event.h>
+#include <xen/vm_event.h>
#include <xen/time.h>
#include <xen/console.h>
#include <xen/softirq.h>
@@ -344,8 +344,8 @@ struct domain *domain_create(
poolid = 0;
err = -ENOMEM;
- d->mem_event = xzalloc(struct mem_event_per_domain);
- if ( !d->mem_event )
+ d->vm_event = xzalloc(struct vm_event_per_domain);
+ if ( !d->vm_event )
goto fail;
d->pbuf = xzalloc_array(char, DOMAIN_PBUF_SIZE);
@@ -387,7 +387,7 @@ struct domain *domain_create(
if ( hardware_domain == d )
hardware_domain = old_hwdom;
atomic_set(&d->refcnt, DOMAIN_DESTROYED);
- xfree(d->mem_event);
+ xfree(d->vm_event);
xfree(d->pbuf);
if ( init_status & INIT_arch )
arch_domain_destroy(d);
@@ -629,7 +629,7 @@ int domain_kill(struct domain *d)
d->is_dying = DOMDYING_dead;
/* Mem event cleanup has to go here because the rings
* have to be put before we call put_domain. */
- mem_event_cleanup(d);
+ vm_event_cleanup(d);
put_domain(d);
send_global_virq(VIRQ_DOM_EXC);
/* fallthrough */
@@ -808,7 +808,7 @@ static void complete_domain_destroy(struct rcu_head *head)
free_xenoprof_pages(d);
#endif
- xfree(d->mem_event);
+ xfree(d->vm_event);
xfree(d->pbuf);
for ( i = d->max_vcpus - 1; i >= 0; i-- )
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index ee578c0..44d45d1 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -24,7 +24,7 @@
#include <xen/bitmap.h>
#include <xen/paging.h>
#include <xen/hypercall.h>
-#include <xen/mem_event.h>
+#include <xen/vm_event.h>
#include <asm/current.h>
#include <asm/irq.h>
#include <asm/page.h>
@@ -1115,9 +1115,9 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t)
u_domctl)
}
break;
- case XEN_DOMCTL_mem_event_op:
- ret = mem_event_domctl(d, &op->u.mem_event_op,
- guest_handle_cast(u_domctl, void));
+ case XEN_DOMCTL_vm_event_op:
+ ret = vm_event_domctl(d, &op->u.vm_event_op,
+ guest_handle_cast(u_domctl, void));
copyback = 1;
break;
diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c
index 47574b3..1f655b0 100644
--- a/xen/common/mem_access.c
+++ b/xen/common/mem_access.c
@@ -24,24 +24,24 @@
#include <xen/sched.h>
#include <xen/guest_access.h>
#include <xen/hypercall.h>
-#include <xen/mem_event.h>
+#include <xen/vm_event.h>
#include <public/memory.h>
#include <asm/p2m.h>
#include <xsm/xsm.h>
void mem_access_resume(struct domain *d)
{
- mem_event_response_t rsp;
+ vm_event_response_t rsp;
/* Pull all responses off the ring. */
- while ( mem_event_get_response(d, &d->mem_event->monitor, &rsp) )
+ while ( vm_event_get_response(d, &d->vm_event->monitor, &rsp) )
{
struct vcpu *v;
- if ( rsp.version != MEM_EVENT_INTERFACE_VERSION )
+ if ( rsp.version != VM_EVENT_INTERFACE_VERSION )
continue;
- if ( rsp.flags & MEM_EVENT_FLAG_DUMMY )
+ if ( rsp.flags & VM_EVENT_FLAG_DUMMY )
continue;
/* Validate the vcpu_id in the response. */
@@ -50,11 +50,11 @@ void mem_access_resume(struct domain *d)
v = d->vcpu[rsp.vcpu_id];
- p2m_mem_event_emulate_check(v, &rsp);
+ p2m_vm_event_emulate_check(v, &rsp);
/* Unpause domain. */
- if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
- mem_event_vcpu_unpause(v);
+ if ( rsp.flags & VM_EVENT_FLAG_VCPU_PAUSED )
+ vm_event_vcpu_unpause(v);
}
}
@@ -77,12 +77,12 @@ int mem_access_memop(unsigned long cmd,
if ( !p2m_mem_access_sanity_check(d) )
goto out;
- rc = xsm_mem_event_op(XSM_DM_PRIV, d, XENMEM_access_op);
+ rc = xsm_vm_event_op(XSM_DM_PRIV, d, XENMEM_access_op);
if ( rc )
goto out;
rc = -ENODEV;
- if ( unlikely(!d->mem_event->monitor.ring_page) )
+ if ( unlikely(!d->vm_event->monitor.ring_page) )
goto out;
switch ( mao.op )
@@ -147,13 +147,13 @@ int mem_access_memop(unsigned long cmd,
return rc;
}
-int mem_access_send_req(struct domain *d, mem_event_request_t *req)
+int mem_access_send_req(struct domain *d, vm_event_request_t *req)
{
- int rc = mem_event_claim_slot(d, &d->mem_event->monitor);
+ int rc = vm_event_claim_slot(d, &d->vm_event->monitor);
if ( rc < 0 )
return rc;
- mem_event_put_request(d, &d->mem_event->monitor, req);
+ vm_event_put_request(d, &d->vm_event->monitor, req);
return 0;
}
diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
index 78c6977..964384b 100644
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -1346,7 +1346,7 @@ static int assign_device(struct domain *d, u16 seg, u8
bus, u8 devfn)
* enabled for this domain */
if ( unlikely(!need_iommu(d) &&
(d->arch.hvm_domain.mem_sharing_enabled ||
- d->mem_event->paging.ring_page ||
+ d->vm_event->paging.ring_page ||
p2m_get_hostp2m(d)->global_logdirty)) )
return -EXDEV;
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index da36504..21a8d71 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -45,7 +45,7 @@ struct p2m_domain {
unsigned long shattered[4];
} stats;
- /* If true, and an access fault comes in and there is no mem_event
listener,
+ /* If true, and an access fault comes in and there is no vm_event listener,
* pause domain. Otherwise, remove access restrictions. */
bool_t access_required;
};
@@ -71,8 +71,8 @@ typedef enum {
} p2m_type_t;
static inline
-void p2m_mem_event_emulate_check(struct vcpu *v,
- const mem_event_response_t *rsp)
+void p2m_vm_event_emulate_check(struct vcpu *v,
+ const vm_event_response_t *rsp)
{
/* Not supported on ARM. */
};
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 6a77a93..20ede1e 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -478,13 +478,13 @@ struct arch_vcpu
/*
* Should we emulate the next matching instruction on VCPU resume
- * after a mem_event?
+ * after a vm_event?
*/
struct {
uint32_t emulate_flags;
unsigned long gpa;
unsigned long eip;
- } mem_event;
+ } vm_event;
} __cacheline_aligned;
diff --git a/xen/include/asm-x86/hvm/emulate.h
b/xen/include/asm-x86/hvm/emulate.h
index 5411302..b726654 100644
--- a/xen/include/asm-x86/hvm/emulate.h
+++ b/xen/include/asm-x86/hvm/emulate.h
@@ -38,7 +38,7 @@ int hvm_emulate_one(
struct hvm_emulate_ctxt *hvmemul_ctxt);
int hvm_emulate_one_no_write(
struct hvm_emulate_ctxt *hvmemul_ctxt);
-void hvm_mem_event_emulate_one(bool_t nowrite,
+void hvm_vm_event_emulate_one(bool_t nowrite,
unsigned int trapnr,
unsigned int errcode);
void hvm_emulate_prepare(
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index b96f24c..b40890a 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -245,7 +245,7 @@ struct p2m_domain {
* retyped get this access type. See definition of p2m_access_t. */
p2m_access_t default_access;
- /* If true, and an access fault comes in and there is no mem_event
listener,
+ /* If true, and an access fault comes in and there is no vm_event
listener,
* pause domain. Otherwise, remove access restrictions. */
bool_t access_required;
@@ -579,7 +579,7 @@ void p2m_mem_paging_resume(struct domain *d);
* locks -- caller must also xfree the request. */
bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
struct npfec npfec,
- mem_event_request_t **req_ptr);
+ vm_event_request_t **req_ptr);
/* Set access type for a region of pfns.
* If start_pfn == -1ul, sets the default access type */
@@ -593,8 +593,8 @@ int p2m_get_mem_access(struct domain *d, unsigned long pfn,
/* Check for emulation and mark vcpu for skipping one instruction
* upon rescheduling if required. */
-void p2m_mem_event_emulate_check(struct vcpu *v,
- const mem_event_response_t *rsp);
+void p2m_vm_event_emulate_check(struct vcpu *v,
+ const vm_event_response_t *rsp);
/* Enable arch specific introspection options (such as MSR interception). */
void p2m_setup_introspection(struct domain *d);
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index a7d3e94..6396f4c 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -1133,7 +1133,7 @@ struct xen_domctl {
#define XEN_DOMCTL_suppress_spurious_page_faults 53
#define XEN_DOMCTL_debug_op 54
#define XEN_DOMCTL_gethvmcontext_partial 55
-#define XEN_DOMCTL_mem_event_op 56
+#define XEN_DOMCTL_vm_event_op 56
#define XEN_DOMCTL_mem_sharing_op 57
#define XEN_DOMCTL_disable_migrate 58
#define XEN_DOMCTL_gettscinfo 59
@@ -1202,7 +1202,6 @@ struct xen_domctl {
struct xen_domctl_subscribe subscribe;
struct xen_domctl_debug_op debug_op;
struct xen_domctl_vm_event_op vm_event_op;
- struct xen_domctl_mem_event_op mem_event_op;
struct xen_domctl_mem_sharing_op mem_sharing_op;
#if defined(__i386__) || defined(__x86_64__)
struct xen_domctl_cpuid cpuid;
diff --git a/xen/include/xen/mem_access.h b/xen/include/xen/mem_access.h
index 6ceb2a4..1d01221 100644
--- a/xen/include/xen/mem_access.h
+++ b/xen/include/xen/mem_access.h
@@ -29,7 +29,7 @@
int mem_access_memop(unsigned long cmd,
XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg);
-int mem_access_send_req(struct domain *d, mem_event_request_t *req);
+int mem_access_send_req(struct domain *d, vm_event_request_t *req);
/* Resumes the running of the VCPU, restarting the last instruction */
void mem_access_resume(struct domain *d);
@@ -44,7 +44,7 @@ int mem_access_memop(unsigned long cmd,
}
static inline
-int mem_access_send_req(struct domain *d, mem_event_request_t *req)
+int mem_access_send_req(struct domain *d, vm_event_request_t *req)
{
return -ENOSYS;
}
diff --git a/xen/include/xen/p2m-common.h b/xen/include/xen/p2m-common.h
index 29f3628..5da8a2d 100644
--- a/xen/include/xen/p2m-common.h
+++ b/xen/include/xen/p2m-common.h
@@ -1,12 +1,12 @@
#ifndef _XEN_P2M_COMMON_H
#define _XEN_P2M_COMMON_H
-#include <public/mem_event.h>
+#include <public/vm_event.h>
/*
* Additional access types, which are used to further restrict
* the permissions given my the p2m_type_t memory type. Violations
- * caused by p2m_access_t restrictions are sent to the mem_event
+ * caused by p2m_access_t restrictions are sent to the vm_event
* interface.
*
* The access permissions are soft state: when any ambiguous change of page
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index 05ebf03..1debd31 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -578,7 +578,7 @@ static int flask_domctl(struct domain *d, int cmd)
case XEN_DOMCTL_memory_mapping:
case XEN_DOMCTL_set_target:
#ifdef HAS_MEM_ACCESS
- case XEN_DOMCTL_mem_event_op:
+ case XEN_DOMCTL_vm_event_op:
#endif
#ifdef CONFIG_X86
/* These have individual XSM hooks (arch/x86/domctl.c) */
@@ -689,7 +689,7 @@ static int flask_domctl(struct domain *d, int cmd)
return current_has_perm(d, SECCLASS_DOMAIN, DOMAIN__TRIGGER);
case XEN_DOMCTL_set_access_required:
- return current_has_perm(d, SECCLASS_HVM, HVM__MEM_EVENT);
+ return current_has_perm(d, SECCLASS_HVM, HVM__VM_EVENT);
case XEN_DOMCTL_debug_op:
case XEN_DOMCTL_gdbsx_guestmemio:
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |