# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1261031276 0
# Node ID 98bd49474dbf9a4a1d958d30bd947f1258b5246b
# Parent 46754a9f0be1bc2eddb16af5124ff5bb30b93e41
Foreign mappings need to verify if the underlying pages are sharable/shared. If
so, only RO mappings are allowed to go ahead. If an RW mapping to
sharable/shared page is requested, the GFN will be unshared (if there are free
pages for private copies) or an error returned otherwise. Note that all tools
(libxc + backends) which map foreign mappings need to check for error return
values.
Signed-off-by: Grzegorz Milos <Grzegorz.Milos@xxxxxxxxxx>
---
xen/arch/x86/mm.c | 33 +++++++++++++++++++++++++++++++++
xen/common/grant_table.c | 27 +++++++++++++++++++--------
2 files changed, 52 insertions(+), 8 deletions(-)
diff -r 46754a9f0be1 -r 98bd49474dbf xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Thu Dec 17 06:27:56 2009 +0000
+++ b/xen/arch/x86/mm.c Thu Dec 17 06:27:56 2009 +0000
@@ -3147,6 +3147,20 @@ int do_mmu_update(
rc = -ENOENT;
break;
}
+ /* XXX: Ugly: pull all the checks into a separate
function.
+ * Don't want to do it now, not to interfere with
mem_paging
+ * patches */
+ else if ( p2m_ram_shared == l1e_p2mt )
+ {
+ /* Unshare the page for RW foreign mappings */
+ if(l1e_get_flags(l1e) & _PAGE_RW)
+ {
+ rc = mem_sharing_unshare_page(pg_owner,
+ l1e_get_pfn(l1e),
+ 0);
+ if(rc) break;
+ }
+ }
okay = mod_l1_entry(va, l1e, mfn,
cmd == MMU_PT_UPDATE_PRESERVE_AD, v,
@@ -3171,6 +3185,13 @@ int do_mmu_update(
rc = -ENOENT;
break;
}
+ else if ( p2m_ram_shared == l2e_p2mt )
+ {
+ MEM_LOG("Unexpected attempt to map shared page.\n");
+ rc = -EINVAL;
+ break;
+ }
+
okay = mod_l2_entry(va, l2e, mfn,
cmd == MMU_PT_UPDATE_PRESERVE_AD, v);
@@ -3192,6 +3213,12 @@ int do_mmu_update(
else if ( p2m_ram_paging_in_start == l3e_p2mt )
{
rc = -ENOENT;
+ break;
+ }
+ else if ( p2m_ram_shared == l3e_p2mt )
+ {
+ MEM_LOG("Unexpected attempt to map shared page.\n");
+ rc = -EINVAL;
break;
}
@@ -3217,6 +3244,12 @@ int do_mmu_update(
else if ( p2m_ram_paging_in_start == l4e_p2mt )
{
rc = -ENOENT;
+ break;
+ }
+ else if ( p2m_ram_shared == l4e_p2mt )
+ {
+ MEM_LOG("Unexpected attempt to map shared page.\n");
+ rc = -EINVAL;
break;
}
diff -r 46754a9f0be1 -r 98bd49474dbf xen/common/grant_table.c
--- a/xen/common/grant_table.c Thu Dec 17 06:27:56 2009 +0000
+++ b/xen/common/grant_table.c Thu Dec 17 06:27:56 2009 +0000
@@ -105,6 +105,8 @@ static unsigned inline int max_nr_maptra
return (max_nr_grant_frames * MAX_MAPTRACK_TO_GRANTS_RATIO);
}
+#define gfn_to_mfn_private(_d, _gfn, _p2mt) \
+ mfn_x(gfn_to_mfn_unshare(_d, _gfn, _p2mt, 1))
#define SHGNT_PER_PAGE_V1 (PAGE_SIZE / sizeof(grant_entry_v1_t))
#define shared_entry_v1(t, e) \
@@ -493,12 +495,16 @@ __gnttab_map_grant_ref(
if ( !act->pin )
{
+ p2m_type_t p2mt;
+
act->domid = ld->domain_id;
if ( sha1 )
act->gfn = sha1->frame;
else
act->gfn = sha2->full_page.frame;
- act->frame = gmfn_to_mfn(rd, act->gfn);
+ act->frame = (op->flags & GNTMAP_readonly) ?
+ gmfn_to_mfn(rd, act->gfn) :
+ gfn_to_mfn_private(rd, act->gfn, &p2mt);
act->start = 0;
act->length = PAGE_SIZE;
act->is_sub_page = 0;
@@ -549,7 +555,7 @@ __gnttab_map_grant_ref(
if ( rc != GNTST_okay )
goto undo_out;
}
- else if ( owner == rd )
+ else if ( owner == rd || owner == dom_cow )
{
if ( gnttab_host_mapping_get_page_type(op, ld, rd) &&
!get_page_type(pg, PGT_writable_page) )
@@ -1365,6 +1371,7 @@ gnttab_transfer(
struct gnttab_transfer gop;
unsigned long mfn;
unsigned int max_bitsize;
+ p2m_type_t p2mt;
for ( i = 0; i < count; i++ )
{
@@ -1379,7 +1386,7 @@ gnttab_transfer(
return -EFAULT;
}
- mfn = gmfn_to_mfn(d, gop.mfn);
+ mfn = gfn_to_mfn_private(d, gop.mfn, &p2mt);
/* Check the passed page frame for basic validity. */
if ( unlikely(!mfn_valid(mfn)) )
@@ -1650,6 +1657,7 @@ __acquire_grant_for_copy(
int is_sub_page;
struct domain *ignore;
s16 rc = GNTST_okay;
+ p2m_type_t p2mt;
*owning_domain = NULL;
@@ -1762,7 +1770,8 @@ __acquire_grant_for_copy(
else if ( sha1 )
{
act->gfn = sha1->frame;
- grant_frame = gmfn_to_mfn(rd, act->gfn);
+ grant_frame = readonly ? gmfn_to_mfn(rd, act->gfn) :
+ gfn_to_mfn_private(rd, act->gfn, &p2mt);
is_sub_page = 0;
trans_page_off = 0;
trans_length = PAGE_SIZE;
@@ -1771,7 +1780,8 @@ __acquire_grant_for_copy(
else if ( !(sha2->hdr.flags & GTF_sub_page) )
{
act->gfn = sha2->full_page.frame;
- grant_frame = gmfn_to_mfn(rd, act->gfn);
+ grant_frame = readonly ? gmfn_to_mfn(rd, act->gfn) :
+ gfn_to_mfn_private(rd, act->gfn, &p2mt);
is_sub_page = 0;
trans_page_off = 0;
trans_length = PAGE_SIZE;
@@ -1780,7 +1790,8 @@ __acquire_grant_for_copy(
else
{
act->gfn = sha2->sub_page.frame;
- grant_frame = gmfn_to_mfn(rd, act->gfn);
+ grant_frame = readonly ? gmfn_to_mfn(rd, act->gfn) :
+ gfn_to_mfn_private(rd, act->gfn, &p2mt);
is_sub_page = 1;
trans_page_off = sha2->sub_page.page_off;
trans_length = sha2->sub_page.length;
@@ -1917,7 +1928,7 @@ __gnttab_copy(
else
{
p2m_type_t p2mt;
- d_frame = mfn_x(gfn_to_mfn(dd, op->dest.u.gmfn, &p2mt));
+ d_frame = gfn_to_mfn_private(dd, op->dest.u.gmfn, &p2mt);
if ( p2m_is_paging(p2mt) )
{
p2m_mem_paging_populate(dd, op->dest.u.gmfn);
@@ -2351,7 +2362,7 @@ grant_table_create(
goto no_mem_4;
clear_page(t->shared_raw[i]);
}
-
+
for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
gnttab_create_shared_page(d, t, i);
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|