[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 3/4] x86/mm: Use a more descriptive name for pagetable mfns
In many places, a PTE being modified is accompanied by the pagetable mfn which contains the PTE (primarily in order to be able to maintain linear mapping counts). In many cases, this mfn is stored in the non-descript variable (or argement) "pfn". Replace these names with lNmfn, to indicate that 1) this is a pagetable mfn, and 2) that it is the same level as the PTE in question. This should be enough to remind readers that it's the mfn containing the PTE. No functional change. Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxx> --- CC: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CC: Jan Beulich <jbeulich@xxxxxxxx> --- xen/arch/x86/mm.c | 50 +++++++++++++++++++++++------------------------ 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index c05039ab21..54b4100d55 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -1141,7 +1141,7 @@ static int get_page_and_type_from_mfn( define_get_linear_pagetable(l2); static int get_page_from_l2e( - l2_pgentry_t l2e, unsigned long pfn, struct domain *d, unsigned int flags) + l2_pgentry_t l2e, unsigned long l2mfn, struct domain *d, unsigned int flags) { unsigned long mfn = l2e_get_pfn(l2e); int rc; @@ -1156,7 +1156,7 @@ get_page_from_l2e( ASSERT(!(flags & PTF_preemptible)); rc = get_page_and_type_from_mfn(_mfn(mfn), PGT_l1_page_table, d, flags); - if ( unlikely(rc == -EINVAL) && get_l2_linear_pagetable(l2e, pfn, d) ) + if ( unlikely(rc == -EINVAL) && get_l2_linear_pagetable(l2e, l2mfn, d) ) rc = 0; return rc; @@ -1165,7 +1165,7 @@ get_page_from_l2e( define_get_linear_pagetable(l3); static int get_page_from_l3e( - l3_pgentry_t l3e, unsigned long pfn, struct domain *d, unsigned int flags) + l3_pgentry_t l3e, unsigned long l3mfn, struct domain *d, unsigned int flags) { int rc; @@ -1180,7 +1180,7 @@ get_page_from_l3e( l3e_get_mfn(l3e), PGT_l2_page_table, d, flags | PTF_preemptible); if ( unlikely(rc == -EINVAL) && !is_pv_32bit_domain(d) && - get_l3_linear_pagetable(l3e, pfn, d) ) + get_l3_linear_pagetable(l3e, l3mfn, d) ) rc = 0; return rc; @@ -1189,7 +1189,7 @@ get_page_from_l3e( define_get_linear_pagetable(l4); static int get_page_from_l4e( - l4_pgentry_t l4e, unsigned long pfn, struct domain *d, unsigned int flags) + l4_pgentry_t l4e, unsigned long l4mfn, struct domain *d, unsigned int flags) { int rc; @@ -1202,7 +1202,7 @@ get_page_from_l4e( rc = get_page_and_type_from_mfn( l4e_get_mfn(l4e), PGT_l3_page_table, d, flags | PTF_preemptible); - if ( unlikely(rc == -EINVAL) && get_l4_linear_pagetable(l4e, pfn, d) ) + if ( unlikely(rc == -EINVAL) && get_l4_linear_pagetable(l4e, l4mfn, d) ) rc = 0; return rc; @@ -1460,13 +1460,13 @@ static int create_pae_xen_mappings(struct domain *d, l3_pgentry_t *pl3e) static int alloc_l2_table(struct page_info *page, unsigned long type) { struct domain *d = page_get_owner(page); - unsigned long pfn = mfn_x(page_to_mfn(page)); + unsigned long l2mfn = mfn_x(page_to_mfn(page)); l2_pgentry_t *pl2e; unsigned int i; int rc = 0; unsigned int partial_flags = page->partial_flags; - pl2e = map_domain_page(_mfn(pfn)); + pl2e = map_domain_page(_mfn(l2mfn)); /* * NB that alloc_l2_table will never set partial_pte on an l2; but @@ -1492,7 +1492,7 @@ static int alloc_l2_table(struct page_info *page, unsigned long type) rc = -EINTR; } else - rc = get_page_from_l2e(l2e, pfn, d, partial_flags); + rc = get_page_from_l2e(l2e, l2mfn, d, partial_flags); /* * It shouldn't be possible for get_page_from_l2e to return @@ -1559,14 +1559,14 @@ static int alloc_l2_table(struct page_info *page, unsigned long type) static int alloc_l3_table(struct page_info *page) { struct domain *d = page_get_owner(page); - unsigned long pfn = mfn_x(page_to_mfn(page)); + unsigned long l3mfn = mfn_x(page_to_mfn(page)); l3_pgentry_t *pl3e; unsigned int i; int rc = 0; unsigned int partial_flags = page->partial_flags; l3_pgentry_t l3e = l3e_empty(); - pl3e = map_domain_page(_mfn(pfn)); + pl3e = map_domain_page(_mfn(l3mfn)); /* * PAE guests allocate full pages, but aren't required to initialize @@ -1603,7 +1603,7 @@ static int alloc_l3_table(struct page_info *page) rc = -EINTR; } else - rc = get_page_from_l3e(l3e, pfn, d, + rc = get_page_from_l3e(l3e, l3mfn, d, partial_flags | PTF_retain_ref_on_restart); if ( rc == -ERESTART ) @@ -1786,8 +1786,8 @@ void zap_ro_mpt(mfn_t mfn) static int alloc_l4_table(struct page_info *page) { struct domain *d = page_get_owner(page); - unsigned long pfn = mfn_x(page_to_mfn(page)); - l4_pgentry_t *pl4e = map_domain_page(_mfn(pfn)); + unsigned long l4mfn = mfn_x(page_to_mfn(page)); + l4_pgentry_t *pl4e = map_domain_page(_mfn(l4mfn)); unsigned int i; int rc = 0; unsigned int partial_flags = page->partial_flags; @@ -1809,7 +1809,7 @@ static int alloc_l4_table(struct page_info *page) rc = -EINTR; } else - rc = get_page_from_l4e(l4e, pfn, d, + rc = get_page_from_l4e(l4e, l4mfn, d, partial_flags | PTF_retain_ref_on_restart); if ( rc == -ERESTART ) @@ -1869,7 +1869,7 @@ static int alloc_l4_table(struct page_info *page) if ( !rc ) { - init_xen_l4_slots(pl4e, _mfn(pfn), + init_xen_l4_slots(pl4e, _mfn(l4mfn), d, INVALID_MFN, VM_ASSIST(d, m2p_strict)); atomic_inc(&d->arch.pv.nr_l4_pages); } @@ -1896,18 +1896,18 @@ static void free_l1_table(struct page_info *page) static int free_l2_table(struct page_info *page) { struct domain *d = page_get_owner(page); - unsigned long pfn = mfn_x(page_to_mfn(page)); + unsigned long l2mfn = mfn_x(page_to_mfn(page)); l2_pgentry_t *pl2e; int rc = 0; unsigned int partial_flags = page->partial_flags, i = page->nr_validated_ptes - !(partial_flags & PTF_partial_set); - pl2e = map_domain_page(_mfn(pfn)); + pl2e = map_domain_page(_mfn(l2mfn)); for ( ; ; ) { if ( is_guest_l2_slot(d, page->u.inuse.type_info, i) ) - rc = put_page_from_l2e(pl2e[i], pfn, partial_flags); + rc = put_page_from_l2e(pl2e[i], l2mfn, partial_flags); if ( rc < 0 ) break; @@ -1948,17 +1948,17 @@ static int free_l2_table(struct page_info *page) static int free_l3_table(struct page_info *page) { struct domain *d = page_get_owner(page); - unsigned long pfn = mfn_x(page_to_mfn(page)); + unsigned long l3mfn = mfn_x(page_to_mfn(page)); l3_pgentry_t *pl3e; int rc = 0; unsigned int partial_flags = page->partial_flags, i = page->nr_validated_ptes - !(partial_flags & PTF_partial_set); - pl3e = map_domain_page(_mfn(pfn)); + pl3e = map_domain_page(_mfn(l3mfn)); for ( ; ; ) { - rc = put_page_from_l3e(pl3e[i], pfn, partial_flags); + rc = put_page_from_l3e(pl3e[i], l3mfn, partial_flags); if ( rc < 0 ) break; @@ -1995,15 +1995,15 @@ static int free_l3_table(struct page_info *page) static int free_l4_table(struct page_info *page) { struct domain *d = page_get_owner(page); - unsigned long pfn = mfn_x(page_to_mfn(page)); - l4_pgentry_t *pl4e = map_domain_page(_mfn(pfn)); + unsigned long l4mfn = mfn_x(page_to_mfn(page)); + l4_pgentry_t *pl4e = map_domain_page(_mfn(l4mfn)); int rc = 0; unsigned partial_flags = page->partial_flags, i = page->nr_validated_ptes - !(partial_flags & PTF_partial_set); do { if ( is_guest_l4_slot(d, i) ) - rc = put_page_from_l4e(pl4e[i], pfn, partial_flags); + rc = put_page_from_l4e(pl4e[i], l4mfn, partial_flags); if ( rc < 0 ) break; partial_flags = 0; -- 2.24.0 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |