|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v4 8/9] libxc: rework of domain builder's page table handler
On Thu, Nov 05, 2015 at 03:36:34PM +0100, Juergen Gross wrote:
> In order to prepare a p2m list outside of the initial kernel mapping
> do a rework of the domain builder's page table handler. The goal is
> to be able to use common helpers for page table allocation and setup
> for initial kernel page tables and page tables mapping the p2m list.
> This is achieved by supporting multiple mapping areas. The mapped
> virtual addresses of the single areas must not overlap, while the
> page tables of a new area added might already be partially present.
> Especially the top level page table is existing only once, of course.
>
> Currently restrict the number of mappings to 1 because the only mapping
> now is the initial mapping created by toolstack. There should not be
> behaviour change and guest visible change introduced.
>
> Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
Reviewed-by: Wei Liu <wei.liu2@xxxxxxxxxx>
Some comments below.
[...]
>
> -static unsigned long
> -nr_page_tables(struct xc_dom_image *dom,
> - xen_vaddr_t start, xen_vaddr_t end, unsigned long bits)
> +static int count_pgtables(struct xc_dom_image *dom, xen_vaddr_t from,
> + xen_vaddr_t to, xen_pfn_t pfn)
> {
> - xen_vaddr_t mask = bits_to_mask(bits);
> - int tables;
> + struct xc_dom_image_x86 *domx86 = dom->arch_private;
> + struct xc_dom_x86_mapping *map, *map_cmp;
> + xen_pfn_t pfn_end;
> + xen_vaddr_t mask;
> + unsigned bits;
> + int l, m;
>
> - if ( bits == 0 )
> - return 0; /* unused */
> + if ( domx86->n_mappings == MAPPING_MAX )
> + {
> + xc_dom_panic(dom->xch, XC_OUT_OF_MEMORY,
> + "%s: too many mappings\n", __FUNCTION__);
> + return -ENOMEM;
> + }
> + map = domx86->maps + domx86->n_mappings;
>
> - if ( bits == (8 * sizeof(unsigned long)) )
> + pfn_end = pfn + ((to - from) >> PAGE_SHIFT_X86);
> + if ( pfn_end >= dom->p2m_size )
> {
> - /* must be pgd, need one */
> - start = 0;
> - end = -1;
> - tables = 1;
> + xc_dom_panic(dom->xch, XC_OUT_OF_MEMORY,
> + "%s: not enough memory for initial mapping (%#"PRIpfn"
> > %#"PRIpfn")",
> + __FUNCTION__, pfn_end, dom->p2m_size);
> + return -ENOMEM;
> }
> - else
> + for ( m = 0; m < domx86->n_mappings; m++ )
> {
> - start = round_down(start, mask);
> - end = round_up(end, mask);
> - tables = ((end - start) >> bits) + 1;
> + map_cmp = domx86->maps + m;
> + if ( from < map_cmp->area.to && to > map_cmp->area.from )
> + {
> + xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
> + "%s: overlapping mappings\n", __FUNCTION__);
> + return -1;
Please use -EINVAL.
> + }
> }
>
> - DOMPRINTF("%s: 0x%016" PRIx64 "/%ld: 0x%016" PRIx64
> - " -> 0x%016" PRIx64 ", %d table(s)",
> - __FUNCTION__, mask, bits, start, end, tables);
> - return tables;
> + memset(map, 0, sizeof(*map));
> + map->area.from = from & domx86->params->vaddr_mask;
> + map->area.to = to & domx86->params->vaddr_mask;
> +
> + for ( l = domx86->params->levels - 1; l >= 0; l-- )
> + {
> + map->lvls[l].pfn = pfn + map->area.pgtables;
> + if ( l == domx86->params->levels - 1 )
> + {
> + if ( domx86->n_mappings == 0 )
> + {
> + map->lvls[l].from = 0;
> + map->lvls[l].to = domx86->params->vaddr_mask;
> + map->lvls[l].pgtables = 1;
> + map->area.pgtables++;
> + }
> + continue;
> + }
Could use some comments before this hunk. Like "only one top-level
mapping is required".
> +
> + bits = PAGE_SHIFT_X86 + (l + 1) * PGTBL_LEVEL_SHIFT_X86;
> + mask = bits_to_mask(bits);
> + map->lvls[l].from = map->area.from & ~mask;
> + map->lvls[l].to = map->area.to | mask;
> +
> + if ( domx86->params->levels == 3 && domx86->n_mappings == 0 &&
Please use PGTBL_LEVELS_I386.
> + to < 0xc0000000 && l == 1 )
> + {
> + DOMPRINTF("%s: PAE: extra l2 page table for l3#3", __FUNCTION__);
> + map->lvls[l].to = domx86->params->vaddr_mask;
> + }
> +
> + for ( m = 0; m < domx86->n_mappings; m++ )
> + {
> + map_cmp = domx86->maps + m;
> + if ( map_cmp->lvls[l].from == map_cmp->lvls[l].to )
> + continue;
> + if ( map->lvls[l].from >= map_cmp->lvls[l].from &&
> + map->lvls[l].to <= map_cmp->lvls[l].to )
> + {
> + map->lvls[l].from = 0;
> + map->lvls[l].to = 0;
> + break;
> + }
> + assert(map->lvls[l].from >= map_cmp->lvls[l].from ||
> + map->lvls[l].to <= map_cmp->lvls[l].to);
> + if ( map->lvls[l].from >= map_cmp->lvls[l].from &&
> + map->lvls[l].from <= map_cmp->lvls[l].to )
> + map->lvls[l].from = map_cmp->lvls[l].to + 1;
> + if ( map->lvls[l].to >= map_cmp->lvls[l].from &&
> + map->lvls[l].to <= map_cmp->lvls[l].to )
> + map->lvls[l].to = map_cmp->lvls[l].from - 1;
> + }
> + if ( map->lvls[l].from < map->lvls[l].to )
> + map->lvls[l].pgtables =
> + ((map->lvls[l].to - map->lvls[l].from) >> bits) + 1;
> + DOMPRINTF("%s: 0x%016" PRIx64 "/%d: 0x%016" PRIx64 " -> 0x%016"
> PRIx64
> + ", %d table(s)", __FUNCTION__, mask, bits,
> + map->lvls[l].from, map->lvls[l].to, map->lvls[l].pgtables);
> + map->area.pgtables += map->lvls[l].pgtables;
> + }
> +
> + return 0;
> }
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |