|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v2] x86/mm: Add mem access rights to NPT
On Mon, Jun 18, 2018 at 9:19 AM Alexandru Isaila
<aisaila@xxxxxxxxxxxxxxx> wrote:
>
> From: Isaila Alexandru <aisaila@xxxxxxxxxxxxxxx>
>
> This patch adds access rights for the NPT pages. The access rights are
> saved in a radix tree with the root saved in p2m_domain. The rights are
> manipulated through p2m_set_access()
> and p2m_get_access() functions.
> The patch follows the ept logic.
>
> Note: It was tested with xen-access write
>
> Signed-off-by: Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>
>
> ---
> Changes since V1:
> - Save the page access rights in a radix tree
> ---
> xen/arch/x86/mm/mem_access.c | 3 ++
> xen/arch/x86/mm/p2m-pt.c | 100
> ++++++++++++++++++++++++++++++++++-----
> xen/arch/x86/mm/p2m.c | 3 ++
> xen/include/asm-x86/mem_access.h | 2 +-
> xen/include/asm-x86/p2m.h | 6 +++
> 5 files changed, 100 insertions(+), 14 deletions(-)
>
> diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
> index c0cd017..b240f13 100644
> --- a/xen/arch/x86/mm/mem_access.c
> +++ b/xen/arch/x86/mm/mem_access.c
> @@ -221,6 +221,9 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
> {
> req->u.mem_access.flags |= MEM_ACCESS_GLA_VALID;
> req->u.mem_access.gla = gla;
> + }
> + if ( npfec.gla_valid || cpu_has_svm )
> + {
>
> if ( npfec.kind == npfec_kind_with_gla )
> req->u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA;
> diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
> index b8c5d2e..1a16533 100644
> --- a/xen/arch/x86/mm/p2m-pt.c
> +++ b/xen/arch/x86/mm/p2m-pt.c
> @@ -68,7 +68,8 @@
> static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m,
> p2m_type_t t,
> mfn_t mfn,
> - unsigned int level)
> + unsigned int level,
> + p2m_access_t access)
> {
> unsigned long flags;
> /*
> @@ -87,23 +88,27 @@ static unsigned long p2m_type_to_flags(const struct
> p2m_domain *p2m,
> case p2m_ram_paged:
> case p2m_ram_paging_in:
> default:
> - return flags | _PAGE_NX_BIT;
> + flags |= P2M_BASE_FLAGS | _PAGE_NX_BIT;
> + break;
> case p2m_grant_map_ro:
> return flags | P2M_BASE_FLAGS | _PAGE_NX_BIT;
> case p2m_ioreq_server:
> flags |= P2M_BASE_FLAGS | _PAGE_RW | _PAGE_NX_BIT;
> if ( p2m->ioreq.flags & XEN_DMOP_IOREQ_MEM_ACCESS_WRITE )
> - return flags & ~_PAGE_RW;
> - return flags;
> + flags &= ~_PAGE_RW;
> + break;
> case p2m_ram_ro:
> case p2m_ram_logdirty:
> case p2m_ram_shared:
> - return flags | P2M_BASE_FLAGS;
> + flags |= P2M_BASE_FLAGS;
> + break;
> case p2m_ram_rw:
> - return flags | P2M_BASE_FLAGS | _PAGE_RW;
> + flags |= P2M_BASE_FLAGS | _PAGE_RW;
> + break;
> case p2m_grant_map_rw:
> case p2m_map_foreign:
> - return flags | P2M_BASE_FLAGS | _PAGE_RW | _PAGE_NX_BIT;
> + flags |= P2M_BASE_FLAGS | _PAGE_RW | _PAGE_NX_BIT;
> + break;
> case p2m_mmio_direct:
> if ( !rangeset_contains_singleton(mmio_ro_ranges, mfn_x(mfn)) )
> flags |= _PAGE_RW;
> @@ -112,8 +117,37 @@ static unsigned long p2m_type_to_flags(const struct
> p2m_domain *p2m,
> flags |= _PAGE_PWT;
> ASSERT(!level);
> }
> - return flags | P2M_BASE_FLAGS | _PAGE_PCD;
> + flags |= P2M_BASE_FLAGS | _PAGE_PCD;
> + break;
> + }
> + switch (access)
> + {
> + case p2m_access_r:
> + case p2m_access_w:
> + flags |= _PAGE_NX_BIT;
> + flags &= ~_PAGE_RW;
> + break;
> + case p2m_access_rw:
> + flags |= _PAGE_NX_BIT;
> + break;
> + case p2m_access_n:
> + case p2m_access_n2rwx:
> + flags |= _PAGE_NX_BIT;
> + flags &= ~_PAGE_RW;
> + break;
> + case p2m_access_rx:
> + case p2m_access_wx:
> + case p2m_access_rx2rw:
> + flags &= ~(_PAGE_NX_BIT | _PAGE_RW);
> + break;
> + case p2m_access_x:
> + flags &= ~_PAGE_RW;
> + break;
> + case p2m_access_rwx:
> + default:
> + break;
> }
> + return flags;
> }
>
>
> @@ -174,6 +208,35 @@ static void p2m_add_iommu_flags(l1_pgentry_t *p2m_entry,
> l1e_add_flags(*p2m_entry, iommu_nlevel_to_flags(nlevel, flags));
> }
>
> +static p2m_access_t p2m_get_access(struct p2m_domain *p2m, unsigned long gfn)
> +{
> + void *ptr;
> +
> + ptr = radix_tree_lookup(&p2m->mem_access_settings, gfn);
> + if ( !ptr )
> + return p2m_access_rwx;
> + else
> + return radix_tree_ptr_to_int(ptr);
> +}
> +
> +static void p2m_set_access(struct p2m_domain *p2m, unsigned long gfn,
> + p2m_access_t a)
> +{
> + int rc;
> +
Shouldn't there be some locking around the radix tree operations here?
If not, why not?
> + if ( p2m_access_rwx == a )
> + radix_tree_delete(&p2m->mem_access_settings, gfn);
> +
> + rc = radix_tree_insert(&p2m->mem_access_settings, gfn,
> + radix_tree_int_to_ptr(a));
> + if ( rc == -EEXIST )
> + /* If a setting already exists, change it to the new one */
> + radix_tree_replace_slot(
> + radix_tree_lookup_slot(
> + &p2m->mem_access_settings, gfn),
> + radix_tree_int_to_ptr(a));
> +}
> +
> /* Returns: 0 for success, -errno for failure */
> static int
> p2m_next_level(struct p2m_domain *p2m, void **table,
> @@ -201,6 +264,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
> new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW);
>
> p2m_add_iommu_flags(&new_entry, level,
> IOMMUF_readable|IOMMUF_writable);
> + p2m_set_access(p2m, gfn, p2m->default_access);
> p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1);
> }
> else if ( flags & _PAGE_PSE )
> @@ -249,6 +313,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
> {
> new_entry = l1e_from_pfn(pfn | (i << ((level - 1) *
> PAGETABLE_ORDER)),
> flags);
> + p2m_set_access(p2m, gfn, p2m->default_access);
> p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, level);
> }
>
> @@ -256,6 +321,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
>
> new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW);
> p2m_add_iommu_flags(&new_entry, level,
> IOMMUF_readable|IOMMUF_writable);
> + p2m_set_access(p2m, gfn, p2m->default_access);
> p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1);
> }
> else
> @@ -420,8 +486,9 @@ static int do_recalc(struct p2m_domain *p2m, unsigned
> long gfn)
> if ( nt != ot )
> {
> unsigned long mfn = l1e_get_pfn(e);
> + p2m_access_t a = p2m_get_access(p2m, gfn);
> unsigned long flags = p2m_type_to_flags(p2m, nt,
> - _mfn(mfn), level);
> + _mfn(mfn), level, a);
>
> if ( level )
> {
> @@ -569,13 +636,14 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_,
> mfn_t mfn,
> ASSERT(!mfn_valid(mfn) || p2mt != p2m_mmio_direct);
> l3e_content = mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt)
> ? p2m_l3e_from_pfn(mfn_x(mfn),
> - p2m_type_to_flags(p2m, p2mt, mfn, 2))
> + p2m_type_to_flags(p2m, p2mt, mfn, 2, p2ma))
> : l3e_empty();
> entry_content.l1 = l3e_content.l3;
>
> if ( entry_content.l1 != 0 )
> p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
>
> + p2m_set_access(p2m, gfn, p2ma);
> p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 3);
> /* NB: paging_write_p2m_entry() handles tlb flushes properly */
> }
> @@ -608,7 +676,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_,
> mfn_t mfn,
>
> if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) )
> entry_content = p2m_l1e_from_pfn(mfn_x(mfn),
> - p2m_type_to_flags(p2m, p2mt, mfn,
> 0));
> + p2m_type_to_flags(p2m, p2mt, mfn,
> 0, p2ma));
> else
> entry_content = l1e_empty();
>
> @@ -630,6 +698,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_,
> mfn_t mfn,
> p2m->ioreq.entry_count--;
> }
>
> + p2m_set_access(p2m, gfn, p2ma);
> /* level 1 entry */
> p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 1);
> /* NB: paging_write_p2m_entry() handles tlb flushes properly */
> @@ -661,13 +730,14 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_,
> mfn_t mfn,
> ASSERT(!mfn_valid(mfn) || p2mt != p2m_mmio_direct);
> l2e_content = mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt)
> ? p2m_l2e_from_pfn(mfn_x(mfn),
> - p2m_type_to_flags(p2m, p2mt, mfn, 1))
> + p2m_type_to_flags(p2m, p2mt, mfn, 1, p2ma))
> : l2e_empty();
> entry_content.l1 = l2e_content.l2;
>
> if ( entry_content.l1 != 0 )
> p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
>
> + p2m_set_access(p2m, gfn, p2ma);
> p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 2);
> /* NB: paging_write_p2m_entry() handles tlb flushes properly */
> }
> @@ -750,7 +820,7 @@ p2m_pt_get_entry(struct p2m_domain *p2m, gfn_t gfn_,
> * XXX we will return p2m_invalid for unmapped gfns */
> *t = p2m_mmio_dm;
> /* Not implemented except with EPT */
> - *a = p2m_access_rwx;
> + *a = p2m_access_n;
>
> if ( gfn > p2m->max_mapped_pfn )
> {
> @@ -813,6 +883,7 @@ pod_retry_l3:
> l1_table_offset(addr));
> *t = p2m_recalc_type(recalc || _needs_recalc(flags),
> p2m_flags_to_type(flags), p2m, gfn);
> + *a = p2m_get_access(p2m, gfn);
> unmap_domain_page(l3e);
>
> ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t));
> @@ -852,6 +923,7 @@ pod_retry_l2:
> mfn = _mfn(l2e_get_pfn(*l2e) + l1_table_offset(addr));
> *t = p2m_recalc_type(recalc || _needs_recalc(flags),
> p2m_flags_to_type(flags), p2m, gfn);
> + *a = p2m_get_access(p2m, gfn);
> unmap_domain_page(l2e);
>
> ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t));
> @@ -888,6 +960,7 @@ pod_retry_l1:
> }
> mfn = l1e_get_mfn(*l1e);
> *t = p2m_recalc_type(recalc || _needs_recalc(flags), l1t, p2m, gfn);
> + *a = p2m_get_access(p2m, gfn);
> unmap_domain_page(l1e);
>
> ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t) || p2m_is_paging(*t));
> @@ -1127,6 +1200,7 @@ void p2m_pt_init(struct p2m_domain *p2m)
> p2m->change_entry_type_global = p2m_pt_change_entry_type_global;
> p2m->change_entry_type_range = p2m_pt_change_entry_type_range;
> p2m->write_p2m_entry = paging_write_p2m_entry;
> + radix_tree_init(&p2m->mem_access_settings);
> #if P2M_AUDIT
> p2m->audit_p2m = p2m_pt_audit_p2m;
> #else
> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
> index c53cab4..e360fdc 100644
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -675,6 +675,9 @@ void p2m_teardown(struct p2m_domain *p2m)
>
> d = p2m->domain;
>
> + if ( cpu_has_svm )
> + radix_tree_destroy(&p2m->mem_access_settings, NULL);
> +
> p2m_lock(p2m);
> ASSERT(atomic_read(&d->shr_pages) == 0);
> p2m->phys_table = pagetable_null();
> diff --git a/xen/include/asm-x86/mem_access.h
> b/xen/include/asm-x86/mem_access.h
> index 4043c9f..34f2c07 100644
> --- a/xen/include/asm-x86/mem_access.h
> +++ b/xen/include/asm-x86/mem_access.h
> @@ -46,7 +46,7 @@ bool p2m_mem_access_emulate_check(struct vcpu *v,
> /* Sanity check for mem_access hardware support */
> static inline bool p2m_mem_access_sanity_check(struct domain *d)
> {
> - return is_hvm_domain(d) && cpu_has_vmx && hap_enabled(d);
> + return is_hvm_domain(d) && hap_enabled(d);
> }
>
> #endif /*__ASM_X86_MEM_ACCESS_H__ */
> diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
> index d4b3cfc..f5868df 100644
> --- a/xen/include/asm-x86/p2m.h
> +++ b/xen/include/asm-x86/p2m.h
> @@ -288,6 +288,12 @@ struct p2m_domain {
> * retyped get this access type. See definition of p2m_access_t. */
> p2m_access_t default_access;
>
> + /*
> + * Radix tree to store the p2m_access_t settings as the pte's don't have
> + * enough available bits to store this information.
> + */
> + struct radix_tree_root mem_access_settings;
> +
> /* If true, and an access fault comes in and there is no vm_event
> listener,
> * pause domain. Otherwise, remove access restrictions. */
> bool_t access_required;
> --
> 2.7.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |