[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH for-next v2 09/10] x86/domain: move PV specific code to pv/domain.c



On 25/04/17 14:52, Wei Liu wrote:
> - fail:
> -    pv_domain_destroy(d);
> -
> -    return rc;
> -}
> -
> +void paravirt_ctxt_switch_from(struct vcpu *v);
> +void paravirt_ctxt_switch_to(struct vcpu *v);
>  int arch_domain_create(struct domain *d, unsigned int domcr_flags,
>                         struct xen_arch_domainconfig *config)
>  {
> @@ -1919,7 +1717,8 @@ static void save_segments(struct vcpu *v)
>  
>  #define switch_kernel_stack(v) ((void)0)
>  
> -static void paravirt_ctxt_switch_from(struct vcpu *v)
> +/* Needed by PV guests */
> +void paravirt_ctxt_switch_from(struct vcpu *v)
>  {

Could these be moved up to avoid the forward declarations above?

> diff --git a/xen/arch/x86/pv/Makefile b/xen/arch/x86/pv/Makefile
> index ea94599438..2737824e81 100644
> --- a/xen/arch/x86/pv/Makefile
> +++ b/xen/arch/x86/pv/Makefile
> @@ -1,2 +1,3 @@
>  obj-y += hypercall.o
>  obj-bin-y += dom0_build.init.o
> +obj-y += domain.o
> diff --git a/xen/arch/x86/pv/domain.c b/xen/arch/x86/pv/domain.c
> new file mode 100644
> index 0000000000..562c3d03f5
> --- /dev/null
> +++ b/xen/arch/x86/pv/domain.c
> @@ -0,0 +1,232 @@
> +/******************************************************************************
> + * arch/x86/pv/domain.c
> + *
> + * PV domain handling
> + */
> +
> +/*
> + *  Copyright (C) 1995  Linus Torvalds
> + *
> + *  Pentium III FXSR, SSE support
> + *  Gareth Hughes <gareth@xxxxxxxxxxx>, May 2000
> + */
> +
> +#include <xen/domain_page.h>
> +#include <xen/errno.h>
> +#include <xen/lib.h>
> +#include <xen/sched.h>
> +
> +static void noreturn continue_nonidle_domain(struct vcpu *v)
> +{
> +    check_wakeup_from_wait();
> +    mark_regs_dirty(guest_cpu_user_regs());
> +    reset_stack_and_jump(ret_from_intr);
> +}
> +
> +static int setup_compat_l4(struct vcpu *v)
> +{
> +    struct page_info *pg;
> +    l4_pgentry_t *l4tab;
> +
> +    pg = alloc_domheap_page(v->domain, MEMF_no_owner);
> +    if ( pg == NULL )
> +        return -ENOMEM;
> +
> +    /* This page needs to look like a pagetable so that it can be shadowed */

.

> +    pg->u.inuse.type_info = PGT_l4_page_table|PGT_validated|1;

More spaces please.

> +
> +    l4tab = __map_domain_page(pg);
> +    clear_page(l4tab);

I know this patch is only code motion, but I really think it would be
safer to defer the type_info update until after we have cleared the page.

> +    init_guest_l4_table(l4tab, v->domain, 1);
> +    unmap_domain_page(l4tab);
> +
> +    v->arch.guest_table = pagetable_from_page(pg);
> +    v->arch.guest_table_user = v->arch.guest_table;
> +
> +    return 0;
> +}
> +
> +static void release_compat_l4(struct vcpu *v)
> +{
> +    if ( !pagetable_is_null(v->arch.guest_table) )
> +        free_domheap_page(pagetable_get_page(v->arch.guest_table));
> +    v->arch.guest_table = pagetable_null();
> +    v->arch.guest_table_user = pagetable_null();
> +}
> +
> +int switch_compat(struct domain *d)
> +{
> +    struct vcpu *v;
> +    int rc;
> +
> +    if ( is_hvm_domain(d) || d->tot_pages != 0 )
> +        return -EACCES;
> +    if ( is_pv_32bit_domain(d) )
> +        return 0;
> +
> +    d->arch.has_32bit_shinfo = 1;
> +    if ( is_pv_domain(d) )
> +        d->arch.is_32bit_pv = 1;

This is_pv_domain() is redundant.  I expect this was fallout from
ripping PVH out.

> +
> +    for_each_vcpu( d, v )
> +    {
> +        rc = setup_compat_arg_xlat(v);
> +        if ( !rc )
> +            rc = setup_compat_l4(v);
> +
> +        if ( rc )
> +            goto undo_and_fail;

This is odd structuring.  Care to rearrange it with two goto's, rather
than inverting the first rc check?

> +    }
> +
> +    domain_set_alloc_bitsize(d);
> +    recalculate_cpuid_policy(d);
> +
> +    d->arch.x87_fip_width = 4;
> +
> +    return 0;
> +
> + undo_and_fail:
> +    d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
> +    for_each_vcpu( d, v )
> +    {
> +        free_compat_arg_xlat(v);
> +        release_compat_l4(v);
> +    }
> +
> +    return rc;
> +}
> +
> +static int pv_create_gdt_ldt_l1tab(struct domain *d, struct vcpu *v)
> +{
> +    return create_perdomain_mapping(d, GDT_VIRT_START(v),
> +                                    1 << GDT_LDT_VCPU_SHIFT,

This should be 1u, when introduced in patch 1.

> +                                    d->arch.pv_domain.gdt_ldt_l1tab, NULL);
> +}
> +
> +static void pv_destroy_gdt_ldt_l1tab(struct domain *d, struct vcpu *v)
> +{
> +    destroy_perdomain_mapping(d, GDT_VIRT_START(v), 1 << GDT_LDT_VCPU_SHIFT);
> +}
> +
> +void pv_vcpu_destroy(struct vcpu *v)
> +{
> +    if ( is_pv_32bit_vcpu(v) )
> +    {
> +        free_compat_arg_xlat(v);
> +        release_compat_l4(v);
> +    }
> +
> +    pv_destroy_gdt_ldt_l1tab(v->domain, v);
> +    xfree(v->arch.pv_vcpu.trap_ctxt);
> +    v->arch.pv_vcpu.trap_ctxt = NULL;
> +}
> +
> +int pv_vcpu_initialise(struct vcpu *v)
> +{
> +    struct domain *d = v->domain;
> +    int rc;
> +
> +    ASSERT(!is_idle_domain(d));
> +
> +    spin_lock_init(&v->arch.pv_vcpu.shadow_ldt_lock);
> +
> +    rc = pv_create_gdt_ldt_l1tab(d, v);
> +    if ( rc )
> +        goto done;
> +
> +    BUILD_BUG_ON(NR_VECTORS * sizeof(*v->arch.pv_vcpu.trap_ctxt) >
> +                 PAGE_SIZE);
> +    v->arch.pv_vcpu.trap_ctxt = xzalloc_array(struct trap_info,
> +                                              NR_VECTORS);
> +    if ( !v->arch.pv_vcpu.trap_ctxt )
> +    {
> +        rc = -ENOMEM;
> +        goto done;
> +    }
> +
> +    /* PV guests by default have a 100Hz ticker. */
> +    v->periodic_period = MILLISECS(10);
> +
> +    v->arch.pv_vcpu.ctrlreg[4] = real_cr4_to_pv_guest_cr4(mmu_cr4_features);
> +
> +    if ( is_pv_32bit_domain(d) )
> +    {
> +        if ( (rc = setup_compat_arg_xlat(v)) )
> +            goto done;
> +
> +        if ( (rc = setup_compat_l4(v)) )
> +            goto done;
> +    }

Now the code is split apart like this, this construct looks suspicious. 
I suppose it probably copes with the toolstack using
XEN_DOMCTL_set_address_size and XEN_DOMCTL_max_vcpus in either order.

> +
> + done:
> +    if ( rc )
> +        pv_vcpu_destroy(v);
> +    return rc;
> +}
> +
> +void pv_domain_destroy(struct domain *d)
> +{
> +    destroy_perdomain_mapping(d, GDT_LDT_VIRT_START,
> +                              GDT_LDT_MBYTES << (20 - PAGE_SHIFT));
> +    xfree(d->arch.pv_domain.cpuidmasks);
> +    d->arch.pv_domain.cpuidmasks = NULL;
> +    free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
> +    d->arch.pv_domain.gdt_ldt_l1tab = NULL;
> +}
> +
> +
> +extern void paravirt_ctxt_switch_from(struct vcpu *v);
> +extern void paravirt_ctxt_switch_to(struct vcpu *v);
> +
> +int pv_domain_initialise(struct domain *d, unsigned int domcr_flags,
> +                         struct xen_arch_domainconfig *config)
> +{
> +    static const struct arch_csw pv_csw = {
> +        .from = paravirt_ctxt_switch_from,
> +        .to   = paravirt_ctxt_switch_to,
> +        .tail = continue_nonidle_domain,
> +    };
> +    int rc = -ENOMEM;
> +
> +    d->arch.pv_domain.gdt_ldt_l1tab =
> +        alloc_xenheap_pages(0, MEMF_node(domain_to_node(d)));
> +    if ( !d->arch.pv_domain.gdt_ldt_l1tab )
> +        goto fail;
> +    clear_page(d->arch.pv_domain.gdt_ldt_l1tab);
> +
> +    if ( levelling_caps & ~LCAP_faulting )
> +    {
> +        d->arch.pv_domain.cpuidmasks = xmalloc(struct cpuidmasks);
> +        if ( !d->arch.pv_domain.cpuidmasks )
> +            goto fail;
> +        *d->arch.pv_domain.cpuidmasks = cpuidmask_defaults;
> +    }
> +
> +    rc = create_perdomain_mapping(d, GDT_LDT_VIRT_START,
> +                                  GDT_LDT_MBYTES << (20 - PAGE_SHIFT),
> +                                  NULL, NULL);
> +    if ( rc )
> +        goto fail;
> +
> +    d->arch.ctxt_switch = &pv_csw;
> +
> +    /* 64-bit PV guest by default. */
> +    d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
> +
> +    return 0;
> +
> +  fail:
> +    pv_domain_destroy(d);
> +
> +    return rc;
> +}
> +
> +/*
> + * Local variables:
> + * mode: C
> + * c-file-style: "BSD"
> + * c-basic-offset: 4
> + * tab-width: 4
> + * indent-tabs-mode: nil
> + * End:
> + */
> diff --git a/xen/include/asm-x86/pv/domain.h b/xen/include/asm-x86/pv/domain.h
> new file mode 100644
> index 0000000000..6288ae24df
> --- /dev/null
> +++ b/xen/include/asm-x86/pv/domain.h
> @@ -0,0 +1,30 @@
> +/*
> + * pv/domain.h
> + *
> + * PV guest interface definitions
> + *
> + * Copyright (C) 2017 Wei Liu <wei.liu2@xxxxxxxxxx>
> + *
> + * This program is free software; you can redistribute it and/or
> + * modify it under the terms and conditions of the GNU General Public
> + * License, version 2, as published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + * General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public
> + * License along with this program; If not, see 
> <http://www.gnu.org/licenses/>.
> + */
> +
> +#ifndef __X86_PV_DOMAIN_H__
> +#define __X86_PV_DOMAIN_H__
> +

#ifdef CONFIG_PV

> +void pv_vcpu_destroy(struct vcpu *v);
> +int pv_vcpu_initialise(struct vcpu *v);
> +void pv_domain_destroy(struct domain *d);
> +int pv_domain_initialise(struct domain *d, unsigned int domcr_flags,
> +                         struct xen_arch_domainconfig *config);

#else

static inline void pv_vcpu_destroy(struct vcpu *v) {};
static inline int pv_vcpu_initialise(struct vcpu *v) { return
-EOPNOTSUPP; };
static inline void pv_domain_destroy(struct domain *d) {};
static inline int pv_domain_initialise(struct domain *d, unsigned int
domcr_flags,
                                      struct xen_arch_domainconfig
*config) { return -EOPNOTSUPP; }

#endif

Please can we try to make new code compatible with eventually turning
off CONFIG_PV and CONFIG_HVM.

~Andrew

> +
> +#endif       /* __X86_PV_DOMAIN_H__ */


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.