|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 3/7] x86/mm: Further restrict permissions on some virtual mappings
As originally reported, the Linear Pagetable slot maps 512GB of ram as RWX,
where the guest has full read access and a lot of direct or indirect control
over the written content. It isn't hard for a PV guest to hide shellcode
here.
Therefore, increase defence in depth by auditing our current pagetable
mappings.
* The regular linear, shadow linear, and per-domain slots have no business
being executable (but need to be written), so are updated to be NX.
* The Read Only mappings of the M2P (compat and regular) don't need to be
writeable or executable.
* The PV GDT mappings don't need to be executable.
Reported-by: Jann Horn <jannh@xxxxxxxxxx>
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Tim Deegan <tim@xxxxxxx>
CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
xen/arch/x86/domain.c | 2 +-
xen/arch/x86/mm.c | 10 +++++-----
xen/arch/x86/mm/hap/hap.c | 4 ++--
xen/arch/x86/mm/shadow/multi.c | 18 +++++++++---------
xen/arch/x86/x86_64/mm.c | 12 ++++++------
5 files changed, 23 insertions(+), 23 deletions(-)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 90e2b1f..ef8c05a 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -2040,7 +2040,7 @@ static void __context_switch(void)
for ( i = 0; i < NR_RESERVED_GDT_PAGES; i++ )
l1e_write(pl1e + FIRST_RESERVED_GDT_PAGE + i,
- l1e_from_pfn(mfn + i, __PAGE_HYPERVISOR));
+ l1e_from_pfn(mfn + i, __PAGE_HYPERVISOR_RW));
}
if ( need_full_gdt(pd) &&
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index e639ce2..77b0af1 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -384,7 +384,7 @@ void __init arch_init_memory(void)
for ( ; i < L3_PAGETABLE_ENTRIES; ++i )
l3tab[i] = l3e_empty();
split_l4e = l4e_from_pfn(virt_to_mfn(l3tab),
- __PAGE_HYPERVISOR);
+ __PAGE_HYPERVISOR_RW);
}
else
++root_pgt_pv_xen_slots;
@@ -1588,9 +1588,9 @@ void init_guest_l4_table(l4_pgentry_t l4tab[], const
struct domain *d,
split_l4e;
#endif
l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
- l4e_from_pfn(domain_page_map_to_mfn(l4tab), __PAGE_HYPERVISOR);
+ l4e_from_pfn(domain_page_map_to_mfn(l4tab), __PAGE_HYPERVISOR_RW);
l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
- l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR);
+ l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW);
if ( zap_ro_mpt || is_pv_32bit_domain(d) || paging_mode_refcounts(d) )
l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
}
@@ -6380,7 +6380,7 @@ int create_perdomain_mapping(struct domain *d, unsigned
long va,
}
l2tab = __map_domain_page(pg);
clear_page(l2tab);
- l3tab[l3_table_offset(va)] = l3e_from_page(pg, __PAGE_HYPERVISOR);
+ l3tab[l3_table_offset(va)] = l3e_from_page(pg, __PAGE_HYPERVISOR_RW);
}
else
l2tab = map_domain_page(_mfn(l3e_get_pfn(l3tab[l3_table_offset(va)])));
@@ -6422,7 +6422,7 @@ int create_perdomain_mapping(struct domain *d, unsigned
long va,
l1tab = __map_domain_page(pg);
}
clear_page(l1tab);
- *pl2e = l2e_from_page(pg, __PAGE_HYPERVISOR);
+ *pl2e = l2e_from_page(pg, __PAGE_HYPERVISOR_RW);
}
else if ( !l1tab )
l1tab = map_domain_page(_mfn(l2e_get_pfn(*pl2e)));
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index b981432..8476269 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -409,11 +409,11 @@ static void hap_install_xen_entries_in_l4(struct vcpu *v,
mfn_t l4mfn)
/* Install the per-domain mappings for this domain */
l4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
l4e_from_pfn(mfn_x(page_to_mfn(d->arch.perdomain_l3_pg)),
- __PAGE_HYPERVISOR);
+ __PAGE_HYPERVISOR_RW);
/* Install a linear mapping */
l4e[l4_table_offset(LINEAR_PT_VIRT_START)] =
- l4e_from_pfn(mfn_x(l4mfn), __PAGE_HYPERVISOR);
+ l4e_from_pfn(mfn_x(l4mfn), __PAGE_HYPERVISOR_RW);
unmap_domain_page(l4e);
}
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 2fb0125..f65ffc6 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -1475,7 +1475,7 @@ void sh_install_xen_entries_in_l4(struct domain *d, mfn_t
gl4mfn, mfn_t sl4mfn)
/* Install the per-domain mappings for this domain */
sl4e[shadow_l4_table_offset(PERDOMAIN_VIRT_START)] =
shadow_l4e_from_mfn(page_to_mfn(d->arch.perdomain_l3_pg),
- __PAGE_HYPERVISOR);
+ __PAGE_HYPERVISOR_RW);
if ( !shadow_mode_external(d) && !is_pv_32bit_domain(d) &&
!VM_ASSIST(d, m2p_strict) )
@@ -1489,7 +1489,7 @@ void sh_install_xen_entries_in_l4(struct domain *d, mfn_t
gl4mfn, mfn_t sl4mfn)
* monitor pagetable structure, which is built in make_monitor_table
* and maintained by sh_update_linear_entries. */
sl4e[shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START)] =
- shadow_l4e_from_mfn(sl4mfn, __PAGE_HYPERVISOR);
+ shadow_l4e_from_mfn(sl4mfn, __PAGE_HYPERVISOR_RW);
/* Self linear mapping. */
if ( shadow_mode_translate(d) && !shadow_mode_external(d) )
@@ -1501,7 +1501,7 @@ void sh_install_xen_entries_in_l4(struct domain *d, mfn_t
gl4mfn, mfn_t sl4mfn)
else
{
sl4e[shadow_l4_table_offset(LINEAR_PT_VIRT_START)] =
- shadow_l4e_from_mfn(gl4mfn, __PAGE_HYPERVISOR);
+ shadow_l4e_from_mfn(gl4mfn, __PAGE_HYPERVISOR_RW);
}
unmap_domain_page(sl4e);
@@ -1654,12 +1654,12 @@ sh_make_monitor_table(struct vcpu *v)
m3mfn = shadow_alloc(d, SH_type_monitor_table, 0);
mfn_to_page(m3mfn)->shadow_flags = 3;
l4e[shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START)]
- = l4e_from_pfn(mfn_x(m3mfn), __PAGE_HYPERVISOR);
+ = l4e_from_pfn(mfn_x(m3mfn), __PAGE_HYPERVISOR_RW);
m2mfn = shadow_alloc(d, SH_type_monitor_table, 0);
mfn_to_page(m2mfn)->shadow_flags = 2;
l3e = map_domain_page(m3mfn);
- l3e[0] = l3e_from_pfn(mfn_x(m2mfn), __PAGE_HYPERVISOR);
+ l3e[0] = l3e_from_pfn(mfn_x(m2mfn), __PAGE_HYPERVISOR_RW);
unmap_domain_page(l3e);
if ( is_pv_32bit_domain(d) )
@@ -1668,7 +1668,7 @@ sh_make_monitor_table(struct vcpu *v)
* area into its usual VAs in the monitor tables */
m3mfn = shadow_alloc(d, SH_type_monitor_table, 0);
mfn_to_page(m3mfn)->shadow_flags = 3;
- l4e[0] = l4e_from_pfn(mfn_x(m3mfn), __PAGE_HYPERVISOR);
+ l4e[0] = l4e_from_pfn(mfn_x(m3mfn), __PAGE_HYPERVISOR_RW);
m2mfn = shadow_alloc(d, SH_type_monitor_table, 0);
mfn_to_page(m2mfn)->shadow_flags = 2;
@@ -3838,7 +3838,7 @@ sh_update_linear_entries(struct vcpu *v)
{
__linear_l4_table[l4_linear_offset(SH_LINEAR_PT_VIRT_START)] =
l4e_from_pfn(pagetable_get_pfn(v->arch.shadow_table[0]),
- __PAGE_HYPERVISOR);
+ __PAGE_HYPERVISOR_RW);
}
else
{
@@ -3846,7 +3846,7 @@ sh_update_linear_entries(struct vcpu *v)
ml4e = map_domain_page(pagetable_get_mfn(v->arch.monitor_table));
ml4e[l4_table_offset(SH_LINEAR_PT_VIRT_START)] =
l4e_from_pfn(pagetable_get_pfn(v->arch.shadow_table[0]),
- __PAGE_HYPERVISOR);
+ __PAGE_HYPERVISOR_RW);
unmap_domain_page(ml4e);
}
}
@@ -3902,7 +3902,7 @@ sh_update_linear_entries(struct vcpu *v)
ml2e[i] =
(shadow_l3e_get_flags(sl3e[i]) & _PAGE_PRESENT)
? l2e_from_pfn(mfn_x(shadow_l3e_get_mfn(sl3e[i])),
- __PAGE_HYPERVISOR)
+ __PAGE_HYPERVISOR_RW)
: l2e_empty();
}
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 34f3250..ac358a8 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -470,7 +470,7 @@ static int setup_m2p_table(struct mem_hotadd_info *info)
clear_page(l2_ro_mpt);
l3e_write(&l3_ro_mpt[l3_table_offset(va)],
l3e_from_paddr(__pa(l2_ro_mpt),
- __PAGE_HYPERVISOR | _PAGE_USER));
+ __PAGE_HYPERVISOR_RO | _PAGE_USER));
l2_ro_mpt += l2_table_offset(va);
}
@@ -515,7 +515,7 @@ void __init paging_init(void)
l3_ro_mpt = page_to_virt(l3_pg);
clear_page(l3_ro_mpt);
l4e_write(&idle_pg_table[l4_table_offset(va)],
- l4e_from_page(l3_pg, __PAGE_HYPERVISOR));
+ l4e_from_page(l3_pg, __PAGE_HYPERVISOR_RW));
}
}
@@ -525,7 +525,7 @@ void __init paging_init(void)
l3_ro_mpt = page_to_virt(l3_pg);
clear_page(l3_ro_mpt);
l4e_write(&idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)],
- l4e_from_page(l3_pg, __PAGE_HYPERVISOR | _PAGE_USER));
+ l4e_from_page(l3_pg, __PAGE_HYPERVISOR_RO | _PAGE_USER));
/*
* Allocate and map the machine-to-phys table.
@@ -612,7 +612,7 @@ void __init paging_init(void)
l2_ro_mpt = page_to_virt(l2_pg);
clear_page(l2_ro_mpt);
l3e_write(&l3_ro_mpt[l3_table_offset(va)],
- l3e_from_page(l2_pg, __PAGE_HYPERVISOR | _PAGE_USER));
+ l3e_from_page(l2_pg, __PAGE_HYPERVISOR_RO | _PAGE_USER));
ASSERT(!l2_table_offset(va));
}
/* NB. Cannot be GLOBAL: guest user mode should not see it. */
@@ -634,7 +634,7 @@ void __init paging_init(void)
compat_idle_pg_table_l2 = l2_ro_mpt;
clear_page(l2_ro_mpt);
l3e_write(&l3_ro_mpt[l3_table_offset(HIRO_COMPAT_MPT_VIRT_START)],
- l3e_from_paddr(__pa(l2_ro_mpt), __PAGE_HYPERVISOR));
+ l3e_from_paddr(__pa(l2_ro_mpt), __PAGE_HYPERVISOR_RO));
l2_ro_mpt += l2_table_offset(HIRO_COMPAT_MPT_VIRT_START);
/* Allocate and map the compatibility mode machine-to-phys table. */
mpt_size = (mpt_size >> 1) + (1UL << (L2_PAGETABLE_SHIFT - 1));
@@ -679,7 +679,7 @@ void __init paging_init(void)
/* Set up linear page table mapping. */
l4e_write(&idle_pg_table[l4_table_offset(LINEAR_PT_VIRT_START)],
- l4e_from_paddr(__pa(idle_pg_table), __PAGE_HYPERVISOR));
+ l4e_from_paddr(__pa(idle_pg_table), __PAGE_HYPERVISOR_RW));
return;
nomem:
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |