Simply drop paged-pages in guest_remove_page(), and notify xenpaging to
drop its reference to the gfn. If the ring is full, the page will
remain in paged-out state in xenpaging. This is not an issue, it just
means this gfn will not be nominated again.
This patch depends on an earlier patch for mem_event_check_ring(),
which adds an additional option to mem_event_check_ring().
Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>
---
v3:
send one-way notification to pager to release page
use new mem_event_check_ring() feature to not pause vcpu when ring is full
v2:
resume dropped page to unpause vcpus
tools/xenpaging/xenpaging.c | 46 ++++++++++++++++++++++------------
xen/arch/x86/mm/p2m.c | 54 +++++++++++++++++++++++++++++++----------
xen/common/memory.c | 6 ++++
xen/include/asm-x86/p2m.h | 4 +++
xen/include/public/mem_event.h | 1
5 files changed, 83 insertions(+), 28 deletions(-)
--- xen-unstable.hg-4.1.22459.orig/tools/xenpaging/xenpaging.c
+++ xen-unstable.hg-4.1.22459/tools/xenpaging/xenpaging.c
@@ -386,6 +386,12 @@ int xenpaging_evict_page(xenpaging_t *pa
return ret;
}
+static void xenpaging_drop_page(xenpaging_t *paging, unsigned long gfn)
+{
+ /* Notify policy of page being dropped */
+ policy_notify_paged_in(paging->mem_event.domain_id, gfn);
+}
+
static int xenpaging_resume_page(xenpaging_t *paging, mem_event_response_t
*rsp, int notify_policy)
{
int ret;
@@ -630,25 +636,33 @@ int main(int argc, char *argv[])
goto out;
}
- /* Populate the page */
- rc = xenpaging_populate_page(paging, &req.gfn, fd, i);
- if ( rc != 0 )
+ if ( req.flags & MEM_EVENT_FLAG_DROP_PAGE )
{
- ERROR("Error populating page");
- goto out;
+ DPRINTF("Dropping page %"PRIx64"\n", req.gfn);
+ xenpaging_drop_page(paging, req.gfn);
}
-
- /* Prepare the response */
- rsp.gfn = req.gfn;
- rsp.p2mt = req.p2mt;
- rsp.vcpu_id = req.vcpu_id;
- rsp.flags = req.flags;
-
- rc = xenpaging_resume_page(paging, &rsp, 1);
- if ( rc != 0 )
+ else
{
- ERROR("Error resuming page");
- goto out;
+ /* Populate the page */
+ rc = xenpaging_populate_page(paging, &req.gfn, fd, i);
+ if ( rc != 0 )
+ {
+ ERROR("Error populating page");
+ goto out;
+ }
+
+ /* Prepare the response */
+ rsp.gfn = req.gfn;
+ rsp.p2mt = req.p2mt;
+ rsp.vcpu_id = req.vcpu_id;
+ rsp.flags = req.flags;
+
+ rc = xenpaging_resume_page(paging, &rsp, 1);
+ if ( rc != 0 )
+ {
+ ERROR("Error resuming page");
+ goto out;
+ }
}
/* Evict a new page to replace the one we just paged in */
--- xen-unstable.hg-4.1.22459.orig/xen/arch/x86/mm/p2m.c
+++ xen-unstable.hg-4.1.22459/xen/arch/x86/mm/p2m.c
@@ -2194,12 +2194,15 @@ p2m_remove_page(struct p2m_domain *p2m,
P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn, mfn);
- for ( i = 0; i < (1UL << page_order); i++ )
+ if ( mfn_valid(_mfn(mfn)) )
{
- mfn_return = p2m->get_entry(p2m, gfn + i, &t, p2m_query);
- if ( !p2m_is_grant(t) )
- set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY);
- ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
+ for ( i = 0; i < (1UL << page_order); i++ )
+ {
+ mfn_return = p2m->get_entry(p2m, gfn + i, &t, p2m_query);
+ if ( !p2m_is_grant(t) )
+ set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY);
+ ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
+ }
}
set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid);
}
@@ -2750,6 +2753,30 @@ int p2m_mem_paging_evict(struct p2m_doma
return 0;
}
+void p2m_mem_paging_drop_page(struct p2m_domain *p2m, unsigned long gfn)
+{
+ struct vcpu *v = current;
+ mem_event_request_t req;
+ struct domain *d = p2m->domain;
+
+ /* Check that there's space on the ring for this request */
+ if ( mem_event_check_ring(d, 0) )
+ {
+ /* This just means this gfn will not be paged again */
+ gdprintk(XENLOG_ERR, "dropped gfn %lx not released in xenpaging\n",
gfn);
+ }
+ else
+ {
+ /* Send release notification to pager */
+ memset(&req, 0, sizeof(req));
+ req.flags |= MEM_EVENT_FLAG_DROP_PAGE;
+ req.gfn = gfn;
+ req.vcpu_id = v->vcpu_id;
+
+ mem_event_put_request(d, &req);
+ }
+}
+
void p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned long gfn)
{
struct vcpu *v = current;
@@ -2823,13 +2850,16 @@ void p2m_mem_paging_resume(struct p2m_do
/* Pull the response off the ring */
mem_event_get_response(d, &rsp);
- /* Fix p2m entry */
- mfn = gfn_to_mfn(p2m, rsp.gfn, &p2mt);
- p2m_lock(p2m);
- set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw);
- set_gpfn_from_mfn(mfn_x(mfn), gfn);
- audit_p2m(p2m, 1);
- p2m_unlock(p2m);
+ /* Fix p2m entry if the page was not dropped */
+ if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) )
+ {
+ mfn = gfn_to_mfn(p2m, rsp.gfn, &p2mt);
+ p2m_lock(p2m);
+ set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw);
+ set_gpfn_from_mfn(mfn_x(mfn), rsp.gfn);
+ audit_p2m(p2m, 1);
+ p2m_unlock(p2m);
+ }
/* Unpause domain */
if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
--- xen-unstable.hg-4.1.22459.orig/xen/common/memory.c
+++ xen-unstable.hg-4.1.22459/xen/common/memory.c
@@ -163,6 +163,12 @@ int guest_remove_page(struct domain *d,
#ifdef CONFIG_X86
mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(d), gmfn, &p2mt));
+ if ( unlikely(p2m_is_paging(p2mt)) )
+ {
+ guest_physmap_remove_page(d, gmfn, mfn, 0);
+ p2m_mem_paging_drop_page(p2m_get_hostp2m(d), gmfn);
+ return 1;
+ }
#else
mfn = gmfn_to_mfn(d, gmfn);
#endif
--- xen-unstable.hg-4.1.22459.orig/xen/include/asm-x86/p2m.h
+++ xen-unstable.hg-4.1.22459/xen/include/asm-x86/p2m.h
@@ -471,6 +471,8 @@ int set_shared_p2m_entry(struct p2m_doma
int p2m_mem_paging_nominate(struct p2m_domain *p2m, unsigned long gfn);
/* Evict a frame */
int p2m_mem_paging_evict(struct p2m_domain *p2m, unsigned long gfn);
+/* Tell xenpaging to drop a paged out frame */
+void p2m_mem_paging_drop_page(struct p2m_domain *p2m, unsigned long gfn);
/* Start populating a paged out frame */
void p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned long gfn);
/* Prepare the p2m for paging a frame in */
@@ -478,6 +480,8 @@ int p2m_mem_paging_prep(struct p2m_domai
/* Resume normal operation (in case a domain was paused) */
void p2m_mem_paging_resume(struct p2m_domain *p2m);
#else
+static inline void p2m_mem_paging_drop_page(struct p2m_domain *p2m, unsigned
long gfn)
+{ }
static inline void p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned
long gfn)
{ }
#endif
--- xen-unstable.hg-4.1.22459.orig/xen/include/public/mem_event.h
+++ xen-unstable.hg-4.1.22459/xen/include/public/mem_event.h
@@ -37,6 +37,7 @@
#define MEM_EVENT_FLAG_VCPU_PAUSED (1 << 0)
#define MEM_EVENT_FLAG_DOM_PAUSED (1 << 1)
#define MEM_EVENT_FLAG_OUT_OF_MEM (1 << 2)
+#define MEM_EVENT_FLAG_DROP_PAGE (1 << 3)
typedef struct mem_event_shared_page {
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|