|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v6 08/10] xen/arm: Add relinquish_p2m_mapping to remove reference on every mapped page
On Tue, 2013-12-17 at 16:27 +0000, Julien Grall wrote:
> This function will be called when the domain relinquishes its memory.
> It removes refcount on every mapped page to a valid MFN.
>
> Currently, Xen doesn't take reference on every new mapping but only for
> foreign
> mapping. Restrict the function only on foreign mapping.
>
> Signed-off-by: Julien Grall <julien.grall@xxxxxxxxxx>
Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
With the proviso that this will be made to properly skip non-present
entries, instead of allocating an empty page to walk, ASAP and certainly
before the release.
>
> ---
> Changes in v6:
> - Typoes
> - Rework preempt
> - Clean up if ( p2mt == ... || p2mt == ... )
> Changes in v4:
> - Use LPAE_ENTRIES instead of hardcoded value
> Changes in v3:
> - Rework title
> - Reuse create_p2m_entries to remove reference
> - Don't forget to set relmem!
> - Fix compilation (missing include)
> Changes in v2:
> - Introduce the patch
> ---
> xen/arch/arm/domain.c | 8 +++++++
> xen/arch/arm/p2m.c | 49
> +++++++++++++++++++++++++++++++++++++++++-
> xen/include/asm-arm/domain.h | 1 +
> xen/include/asm-arm/p2m.h | 15 +++++++++++++
> 4 files changed, 72 insertions(+), 1 deletion(-)
>
> diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
> index 1590708..4099e88 100644
> --- a/xen/arch/arm/domain.c
> +++ b/xen/arch/arm/domain.c
> @@ -717,6 +717,14 @@ int domain_relinquish_resources(struct domain *d)
> if ( ret )
> return ret;
>
> + d->arch.relmem = RELMEM_mapping;
> + /* Fallthrough */
> +
> + case RELMEM_mapping:
> + ret = relinquish_p2m_mapping(d);
> + if ( ret )
> + return ret;
> +
> d->arch.relmem = RELMEM_done;
> /* Fallthrough */
>
> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
> index 410acb6..3f4ab37 100644
> --- a/xen/arch/arm/p2m.c
> +++ b/xen/arch/arm/p2m.c
> @@ -6,6 +6,8 @@
> #include <xen/bitops.h>
> #include <asm/flushtlb.h>
> #include <asm/gic.h>
> +#include <asm/event.h>
> +#include <asm/hardirq.h>
>
> /* First level P2M is 2 consecutive pages */
> #define P2M_FIRST_ORDER 1
> @@ -224,7 +226,8 @@ static int p2m_create_table(struct domain *d,
> enum p2m_operation {
> INSERT,
> ALLOCATE,
> - REMOVE
> + REMOVE,
> + RELINQUISH,
> };
>
> static int create_p2m_entries(struct domain *d,
> @@ -242,6 +245,7 @@ static int create_p2m_entries(struct domain *d,
> unsigned long cur_first_page = ~0,
> cur_first_offset = ~0,
> cur_second_offset = ~0;
> + unsigned long count = 0;
>
> spin_lock(&p2m->lock);
>
> @@ -326,13 +330,19 @@ static int create_p2m_entries(struct domain *d,
> maddr += PAGE_SIZE;
> }
> break;
> + case RELINQUISH:
> case REMOVE:
> {
> lpae_t pte = third[third_table_offset(addr)];
> unsigned long mfn = pte.p2m.base;
>
> if ( !pte.p2m.valid )
> + {
> + count++;
> break;
> + }
> +
> + count += 0x10;
>
> /* TODO: Handle other p2m type */
> if ( p2m_is_foreign(pte.p2m.type) )
> @@ -343,12 +353,35 @@ static int create_p2m_entries(struct domain *d,
>
> memset(&pte, 0x00, sizeof(pte));
> write_pte(&third[third_table_offset(addr)], pte);
> + count++;
> }
> break;
> }
>
> if ( flush )
> flush_tlb_all_local();
> +
> + /* Preempt every 2MiB (mapped) or 32 MiB (unmapped) - arbitrary */
> + if ( op == RELINQUISH && count >= 0x2000 )
> + {
> + if ( hypercall_preempt_check() )
> + {
> + p2m->next_gfn_to_relinquish = maddr >> PAGE_SHIFT;
> + rc = -EAGAIN;
> + goto out;
> + }
> + count = 0;
> + }
> + }
> +
> + if ( op == ALLOCATE || op == INSERT )
> + {
> + unsigned long sgfn = paddr_to_pfn(start_gpaddr);
> + unsigned long egfn = paddr_to_pfn(end_gpaddr);
> +
> + p2m->max_mapped_gfn = MAX(p2m->max_mapped_gfn, egfn);
> + /* Use next_gfn_to_relinquish to store the lowest gfn mapped */
> + p2m->next_gfn_to_relinquish = MIN(p2m->next_gfn_to_relinquish, sgfn);
> }
>
> rc = 0;
> @@ -534,12 +567,26 @@ int p2m_init(struct domain *d)
>
> p2m->first_level = NULL;
>
> + p2m->max_mapped_gfn = 0;
> + p2m->next_gfn_to_relinquish = ULONG_MAX;
> +
> err:
> spin_unlock(&p2m->lock);
>
> return rc;
> }
>
> +int relinquish_p2m_mapping(struct domain *d)
> +{
> + struct p2m_domain *p2m = &d->arch.p2m;
> +
> + return create_p2m_entries(d, RELINQUISH,
> + pfn_to_paddr(p2m->next_gfn_to_relinquish),
> + pfn_to_paddr(p2m->max_mapped_gfn),
> + pfn_to_paddr(INVALID_MFN),
> + MATTR_MEM, p2m_invalid);
> +}
> +
> unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn)
> {
> paddr_t p = p2m_lookup(d, pfn_to_paddr(gpfn), NULL);
> diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
> index 53c9895..28d39a0 100644
> --- a/xen/include/asm-arm/domain.h
> +++ b/xen/include/asm-arm/domain.h
> @@ -112,6 +112,7 @@ struct arch_domain
> RELMEM_not_started,
> RELMEM_xen,
> RELMEM_page,
> + RELMEM_mapping,
> RELMEM_done,
> } relmem;
>
> diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
> index 5ccfa7f..8b7b6d0 100644
> --- a/xen/include/asm-arm/p2m.h
> +++ b/xen/include/asm-arm/p2m.h
> @@ -18,6 +18,15 @@ struct p2m_domain {
>
> /* Current VMID in use */
> uint8_t vmid;
> +
> + /* Highest guest frame that's ever been mapped in the p2m
> + * Only takes into account ram and foreign mapping
> + */
> + unsigned long max_mapped_gfn;
> +
> + /* When releasing mapped gfn's in a preemptible manner, recall where
> + * to resume the search */
> + unsigned long next_gfn_to_relinquish;
> };
>
> /* List of possible type for each page in the p2m entry.
> @@ -48,6 +57,12 @@ int p2m_init(struct domain *d);
> /* Return all the p2m resources to Xen. */
> void p2m_teardown(struct domain *d);
>
> +/* Remove mapping refcount on each mapping page in the p2m
> + *
> + * TODO: For the moment only foreign mappings are handled
> + */
> +int relinquish_p2m_mapping(struct domain *d);
> +
> /* Allocate a new p2m table for a domain.
> *
> * Returns 0 for success or -errno.
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |