|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [RFC 02/22] xen/arm: p2m: Store in p2m_domain whether we need to clean the entry
On Thu, 28 Jul 2016, Julien Grall wrote:
> Each entry in the page table has to table clean when the IOMMU does not
What does it mean to "table clean" ?
> support coherent walk. Rather than querying every time the page table is
> updated, it is possible to do it only once when the p2m is initialized.
>
> This is because this value can never change, Xen would be in big trouble
> otherwise.
>
> With this change, the initialize of the IOMMU for a given domain has to
"the initialization"
> be done earlier in order to know whether the page table entries need to
> be clean. It is fine to move the call earlier because it has no
"be cleaned"
Aside from the small imperfections in the commit message:
Reviewed-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>
> dependency.
>
> Signed-off-by: Julien Grall <julien.grall@xxxxxxx>
> ---
> xen/arch/arm/domain.c | 8 +++++---
> xen/arch/arm/p2m.c | 47
> ++++++++++++++++++++++-------------------------
> xen/include/asm-arm/p2m.h | 3 +++
> 3 files changed, 30 insertions(+), 28 deletions(-)
>
> diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
> index 20bb2ba..48f04c8 100644
> --- a/xen/arch/arm/domain.c
> +++ b/xen/arch/arm/domain.c
> @@ -555,6 +555,11 @@ int arch_domain_create(struct domain *d, unsigned int
> domcr_flags,
> return 0;
>
> ASSERT(config != NULL);
> +
> + /* p2m_init relies on some value initialized by the IOMMU subsystem */
> + if ( (rc = iommu_domain_init(d)) != 0 )
> + goto fail;
> +
> if ( (rc = p2m_init(d)) != 0 )
> goto fail;
>
> @@ -637,9 +642,6 @@ int arch_domain_create(struct domain *d, unsigned int
> domcr_flags,
> if ( is_hardware_domain(d) && (rc = domain_vuart_init(d)) )
> goto fail;
>
> - if ( (rc = iommu_domain_init(d)) != 0 )
> - goto fail;
> -
> return 0;
>
> fail:
> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
> index 40a0b80..d389f2b 100644
> --- a/xen/arch/arm/p2m.c
> +++ b/xen/arch/arm/p2m.c
> @@ -416,7 +416,7 @@ static inline void p2m_remove_pte(lpae_t *p, bool_t
> flush_cache)
> * level_shift is the number of bits at the level we want to create.
> */
> static int p2m_create_table(struct p2m_domain *p2m, lpae_t *entry,
> - int level_shift, bool_t flush_cache)
> + int level_shift)
> {
> struct page_info *page;
> lpae_t *p;
> @@ -462,7 +462,7 @@ static int p2m_create_table(struct p2m_domain *p2m,
> lpae_t *entry,
> else
> clear_page(p);
>
> - if ( flush_cache )
> + if ( p2m->clean_pte )
> clean_dcache_va_range(p, PAGE_SIZE);
>
> unmap_domain_page(p);
> @@ -470,7 +470,7 @@ static int p2m_create_table(struct p2m_domain *p2m,
> lpae_t *entry,
> pte = mfn_to_p2m_entry(_mfn(page_to_mfn(page)), p2m_invalid,
> p2m->default_access);
>
> - p2m_write_pte(entry, pte, flush_cache);
> + p2m_write_pte(entry, pte, p2m->clean_pte);
>
> return 0;
> }
> @@ -653,12 +653,10 @@ static const paddr_t level_shifts[] =
>
> static int p2m_shatter_page(struct p2m_domain *p2m,
> lpae_t *entry,
> - unsigned int level,
> - bool_t flush_cache)
> + unsigned int level)
> {
> const paddr_t level_shift = level_shifts[level];
> - int rc = p2m_create_table(p2m, entry,
> - level_shift - PAGE_SHIFT, flush_cache);
> + int rc = p2m_create_table(p2m, entry, level_shift - PAGE_SHIFT);
>
> if ( !rc )
> {
> @@ -680,7 +678,6 @@ static int p2m_shatter_page(struct p2m_domain *p2m,
> static int apply_one_level(struct domain *d,
> lpae_t *entry,
> unsigned int level,
> - bool_t flush_cache,
> enum p2m_operation op,
> paddr_t start_gpaddr,
> paddr_t end_gpaddr,
> @@ -719,7 +716,7 @@ static int apply_one_level(struct domain *d,
> if ( level < 3 )
> pte.p2m.table = 0; /* Superpage entry */
>
> - p2m_write_pte(entry, pte, flush_cache);
> + p2m_write_pte(entry, pte, p2m->clean_pte);
>
> *flush |= p2m_valid(orig_pte);
>
> @@ -754,7 +751,7 @@ static int apply_one_level(struct domain *d,
> /* Not present -> create table entry and descend */
> if ( !p2m_valid(orig_pte) )
> {
> - rc = p2m_create_table(p2m, entry, 0, flush_cache);
> + rc = p2m_create_table(p2m, entry, 0);
> if ( rc < 0 )
> return rc;
> return P2M_ONE_DESCEND;
> @@ -764,7 +761,7 @@ static int apply_one_level(struct domain *d,
> if ( p2m_mapping(orig_pte) )
> {
> *flush = true;
> - rc = p2m_shatter_page(p2m, entry, level, flush_cache);
> + rc = p2m_shatter_page(p2m, entry, level);
> if ( rc < 0 )
> return rc;
> } /* else: an existing table mapping -> descend */
> @@ -801,7 +798,7 @@ static int apply_one_level(struct domain *d,
> * and descend.
> */
> *flush = true;
> - rc = p2m_shatter_page(p2m, entry, level, flush_cache);
> + rc = p2m_shatter_page(p2m, entry, level);
> if ( rc < 0 )
> return rc;
>
> @@ -827,7 +824,7 @@ static int apply_one_level(struct domain *d,
>
> *flush = true;
>
> - p2m_remove_pte(entry, flush_cache);
> + p2m_remove_pte(entry, p2m->clean_pte);
> p2m_mem_access_radix_set(p2m, paddr_to_pfn(*addr), p2m_access_rwx);
>
> *addr += level_size;
> @@ -886,7 +883,7 @@ static int apply_one_level(struct domain *d,
> /* Shatter large pages as we descend */
> if ( p2m_mapping(orig_pte) )
> {
> - rc = p2m_shatter_page(p2m, entry, level, flush_cache);
> + rc = p2m_shatter_page(p2m, entry, level);
> if ( rc < 0 )
> return rc;
> } /* else: an existing table mapping -> descend */
> @@ -904,7 +901,7 @@ static int apply_one_level(struct domain *d,
> return rc;
>
> p2m_set_permission(&pte, pte.p2m.type, a);
> - p2m_write_pte(entry, pte, flush_cache);
> + p2m_write_pte(entry, pte, p2m->clean_pte);
> }
>
> *addr += level_size;
> @@ -954,17 +951,9 @@ static int apply_p2m_changes(struct domain *d,
> const unsigned int preempt_count_limit = (op == MEMACCESS) ? 1 : 0x2000;
> const bool_t preempt = !is_idle_vcpu(current);
> bool_t flush = false;
> - bool_t flush_pt;
> PAGE_LIST_HEAD(free_pages);
> struct page_info *pg;
>
> - /*
> - * Some IOMMU don't support coherent PT walk. When the p2m is
> - * shared with the CPU, Xen has to make sure that the PT changes have
> - * reached the memory
> - */
> - flush_pt = iommu_enabled && !iommu_has_feature(d,
> IOMMU_FEAT_COHERENT_WALK);
> -
> p2m_write_lock(p2m);
>
> /* Static mapping. P2M_ROOT_PAGES > 1 are handled below */
> @@ -1070,7 +1059,7 @@ static int apply_p2m_changes(struct domain *d,
> lpae_t old_entry = *entry;
>
> ret = apply_one_level(d, entry,
> - level, flush_pt, op,
> + level, op,
> start_gpaddr, end_gpaddr,
> &addr, &maddr, &flush,
> t, a);
> @@ -1127,7 +1116,7 @@ static int apply_p2m_changes(struct domain *d,
>
> page_list_del(pg, &p2m->pages);
>
> - p2m_remove_pte(entry, flush_pt);
> + p2m_remove_pte(entry, p2m->clean_pte);
>
> p2m->stats.mappings[level - 1]--;
> update_reference_mapping(pages[level - 1], old_entry,
> *entry);
> @@ -1399,6 +1388,14 @@ int p2m_init(struct domain *d)
> p2m->mem_access_enabled = false;
> radix_tree_init(&p2m->mem_access_settings);
>
> + /*
> + * Some IOMMUs don't support coherent PT walk. When the p2m is
> + * shared with the CPU, Xen has to make sure that the PT changes have
> + * reached the memory
> + */
> + p2m->clean_pte = iommu_enabled &&
> + !iommu_has_feature(d, IOMMU_FEAT_COHERENT_WALK);
> +
> rc = p2m_alloc_table(d);
>
> return rc;
> diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
> index 53c4d78..03bfd5e 100644
> --- a/xen/include/asm-arm/p2m.h
> +++ b/xen/include/asm-arm/p2m.h
> @@ -48,6 +48,9 @@ struct p2m_domain {
> * decrease. */
> gfn_t lowest_mapped_gfn;
>
> + /* Indicate if it is required to clean the cache when writing an entry */
> + bool_t clean_pte;
> +
> /* Gather some statistics for information purposes only */
> struct {
> /* Number of mappings at each p2m tree level */
> --
> 1.9.1
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |