[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [RFC PATCH 1/8] x86/domctl: introduce a pair of hypercall to set and get cpu topology



On Mon, Jan 08, 2018 at 01:14:44PM -0500, Daniel De Graaf wrote:
>On 01/07/2018 11:01 PM, Chao Gao wrote:
>> Define interface, structures and hypercalls for toolstack to build
>> cpu topology and for guest that will retrieve it [1].
>> Two subop hypercalls introduced by this patch:
>> XEN_DOMCTL_set_cpu_topology to define cpu topology information per domain
>> and XENMEM_get_cpu_topology to retrieve cpu topology information.
>> 
>> [1]: during guest creation, those information helps hvmloader to build ACPI.
>> 
>> Signed-off-by: Chao Gao <chao.gao@xxxxxxxxx>
>
>When adding new XSM controls for use by device models, you also
>need to add the permissions to the device_model macro defined in
>tools/flask/policy/modules/xen.if.  If domains need to call this
>function on themselves (is this only true for get?), you will also
>need to add it to declare_domain_common.
>

Hi, Daniel.

Yes. XENMEM_get_cpu_topology will be called by the domain itself.
And Both get and set will be called by dom0 when creating one domain.
So I need:
1. add *set* and *get* to create_domain_common.
2. add *set* to declare_domain_common.

Is it right?

Thanks
Chao

>> ---
>>   xen/arch/x86/domctl.c               | 27 ++++++++++++++++++++++
>>   xen/arch/x86/hvm/hvm.c              |  7 ++++++
>>   xen/arch/x86/mm.c                   | 45 
>> +++++++++++++++++++++++++++++++++++++
>>   xen/include/asm-x86/hvm/domain.h    | 15 +++++++++++++
>>   xen/include/public/domctl.h         | 22 ++++++++++++++++++
>>   xen/include/public/memory.h         | 27 +++++++++++++++++++++-
>>   xen/include/xsm/dummy.h             |  6 +++++
>>   xen/xsm/dummy.c                     |  1 +
>>   xen/xsm/flask/hooks.c               | 10 +++++++++
>>   xen/xsm/flask/policy/access_vectors |  4 ++++
>>   10 files changed, 163 insertions(+), 1 deletion(-)
>> 
>> diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
>> index 36ab235..4e1bbd5 100644
>> --- a/xen/arch/x86/domctl.c
>> +++ b/xen/arch/x86/domctl.c
>> @@ -347,6 +347,29 @@ void arch_get_domain_info(const struct domain *d,
>>           info->flags |= XEN_DOMINF_hap;
>>   }
>> +static int arch_set_cpu_topology(struct domain *d,
>> +                                 struct xen_domctl_cpu_topology *topology)
>> +{
>> +    if ( !is_hvm_domain(d) ||
>> +         !topology->size || topology->size > HVM_MAX_VCPUS )
>> +        return -EINVAL;
>> +
>> +    if ( !d->arch.hvm_domain.apic_id )
>> +        d->arch.hvm_domain.apic_id = xmalloc_array(uint32_t, 
>> topology->size);
>> +
>> +    if ( !d->arch.hvm_domain.apic_id )
>> +        return -ENOMEM;
>> +
>> +    if ( copy_from_guest(d->arch.hvm_domain.apic_id, topology->tid,
>> +                         topology->size) )
>> +        return -EFAULT;
>> +
>> +    d->arch.hvm_domain.apic_id_size = topology->size;
>> +    d->arch.hvm_domain.core_per_socket = topology->core_per_socket;
>> +    d->arch.hvm_domain.thread_per_core = topology->thread_per_core;
>> +    return 0;
>> +}
>> +
>>   #define MAX_IOPORTS 0x10000
>>   long arch_do_domctl(
>> @@ -1555,6 +1578,10 @@ long arch_do_domctl(
>>           recalculate_cpuid_policy(d);
>>           break;
>> +    case XEN_DOMCTL_set_cpu_topology:
>> +        ret = arch_set_cpu_topology(d, &domctl->u.cpu_topology);
>> +        break;
>> +
>>       default:
>>           ret = iommu_do_domctl(domctl, d, u_domctl);
>>           break;
>> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
>> index 71fddfd..b3b3224 100644
>> --- a/xen/arch/x86/hvm/hvm.c
>> +++ b/xen/arch/x86/hvm/hvm.c
>> @@ -1509,6 +1509,13 @@ int hvm_vcpu_initialise(struct vcpu *v)
>>       int rc;
>>       struct domain *d = v->domain;
>> +    if ( v->vcpu_id > d->arch.hvm_domain.apic_id_size )
>> +    {
>> +        printk(XENLOG_ERR "d%dv%d's apic id isn't set.\n",
>> +               d->domain_id, v->vcpu_id);
>> +        return -ENOENT;
>> +    }
>> +
>>       hvm_asid_flush_vcpu(v);
>>       spin_lock_init(&v->arch.hvm_vcpu.tm_lock);
>> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
>> index a56f875..b90e663 100644
>> --- a/xen/arch/x86/mm.c
>> +++ b/xen/arch/x86/mm.c
>> @@ -4413,6 +4413,51 @@ long arch_memory_op(unsigned long cmd, 
>> XEN_GUEST_HANDLE_PARAM(void) arg)
>>           return rc;
>>       }
>> +    case XENMEM_get_cpu_topology:
>> +    {
>> +        struct domain *d;
>> +        struct xen_cpu_topology_info topology;
>> +
>> +        if ( copy_from_guest(&topology, arg, 1) )
>> +            return -EFAULT;
>> +
>> +        if ( topology.pad || topology.pad2 )
>> +            return -EINVAL;
>> +
>> +        if ( (d = rcu_lock_domain_by_any_id(topology.domid)) == NULL )
>> +            return -ESRCH;
>> +
>> +        rc = xsm_get_cpu_topology(XSM_TARGET, d);
>> +        if ( rc )
>> +            goto get_cpu_topology_failed;
>> +
>> +        rc = -EOPNOTSUPP;
>> +        if ( !is_hvm_domain(d) || !d->arch.hvm_domain.apic_id )
>> +            goto get_cpu_topology_failed;
>> +
>> +        /* allow the size to be zero for users who don't care apic_id */
>> +        if ( topology.size )
>> +        {
>> +            rc = -E2BIG;
>> +            if ( topology.size != d->arch.hvm_domain.apic_id_size )
>> +                goto get_cpu_topology_failed;
>> +
>> +            rc = -EFAULT;
>> +            if ( copy_to_guest(topology.tid.h, d->arch.hvm_domain.apic_id,
>> +                               topology.size) )
>> +                goto get_cpu_topology_failed;
>> +        }
>> +
>> +        topology.core_per_socket = d->arch.hvm_domain.core_per_socket;
>> +        topology.thread_per_core = d->arch.hvm_domain.thread_per_core;
>> +
>> +        rc = __copy_to_guest(arg, &topology, 1) ? -EFAULT : 0;
>> +
>> + get_cpu_topology_failed:
>> +        rcu_unlock_domain(d);
>> +        return rc;
>> +    }
>> +
>>       default:
>>           return subarch_memory_op(cmd, arg);
>>       }
>> diff --git a/xen/include/asm-x86/hvm/domain.h 
>> b/xen/include/asm-x86/hvm/domain.h
>> index 7f128c0..501ed99 100644
>> --- a/xen/include/asm-x86/hvm/domain.h
>> +++ b/xen/include/asm-x86/hvm/domain.h
>> @@ -196,6 +196,21 @@ struct hvm_domain {
>>           struct vmx_domain vmx;
>>           struct svm_domain svm;
>>       };
>> +
>> +    /*
>> +     * an array of apic_id, which is unique and can be used to extract
>> +     * socket ID, core ID and thread ID
>> +     */
>> +    uint32_t *apic_id;
>> +    uint32_t apic_id_size;
>> +
>> +    /*
>> +     * reports the number of core/thread in a socket/core, determining the
>> +     * right-shift value to extract {core/thread} ID from apic_id (defined
>> +     * above).
>> +     */
>> +    uint8_t core_per_socket;
>> +    uint8_t thread_per_core;
>>   };
>>   #define hap_enabled(d)  ((d)->arch.hvm_domain.hap_enabled)
>> diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
>> index 9ae72959..99392b7 100644
>> --- a/xen/include/public/domctl.h
>> +++ b/xen/include/public/domctl.h
>> @@ -1109,6 +1109,26 @@ struct xen_domctl_vuart_op {
>>                                    */
>>   };
>> +/* XEN_DOMCTL_set_cpu_topology */
>> +struct xen_domctl_cpu_topology {
>> +    /* IN - size of 'topology' array */
>> +    uint32_t size;
>> +    /* IN - the number of core in the same socket */
>> +    uint8_t core_per_socket;
>> +    /* IN - the number of thread in the same core */
>> +    uint8_t thread_per_core;
>> +    /* IN - should be 0 */
>> +    uint8_t pad[2];
>> +    /*
>> +     * IN - an array of topology ID (tid), which is used to compute a given
>> +     * vcpu's core id and thread id. For x86, topology ID is the APIC ID,
>> +     * which is system-level unique.
>> +     */
>> +    XEN_GUEST_HANDLE_64(uint32) tid;
>> +};
>> +typedef struct xen_domctl_cpu_topology xen_domctl_cpu_topology;
>> +DEFINE_XEN_GUEST_HANDLE(xen_domctl_cpu_topology);
>> +
>>   struct xen_domctl {
>>       uint32_t cmd;
>>   #define XEN_DOMCTL_createdomain                   1
>> @@ -1188,6 +1208,7 @@ struct xen_domctl {
>>   #define XEN_DOMCTL_soft_reset                    79
>>   #define XEN_DOMCTL_set_gnttab_limits             80
>>   #define XEN_DOMCTL_vuart_op                      81
>> +#define XEN_DOMCTL_set_cpu_topology              82
>>   #define XEN_DOMCTL_gdbsx_guestmemio            1000
>>   #define XEN_DOMCTL_gdbsx_pausevcpu             1001
>>   #define XEN_DOMCTL_gdbsx_unpausevcpu           1002
>> @@ -1252,6 +1273,7 @@ struct xen_domctl {
>>           struct xen_domctl_psr_alloc         psr_alloc;
>>           struct xen_domctl_set_gnttab_limits set_gnttab_limits;
>>           struct xen_domctl_vuart_op          vuart_op;
>> +        struct xen_domctl_cpu_topology      cpu_topology;
>>           uint8_t                             pad[128];
>>       } u;
>>   };
>> diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
>> index 29386df..a6bcc64 100644
>> --- a/xen/include/public/memory.h
>> +++ b/xen/include/public/memory.h
>> @@ -650,7 +650,32 @@ struct xen_vnuma_topology_info {
>>   typedef struct xen_vnuma_topology_info xen_vnuma_topology_info_t;
>>   DEFINE_XEN_GUEST_HANDLE(xen_vnuma_topology_info_t);
>> -/* Next available subop number is 28 */
>> +/*
>> + * XENMEM_get_cpu_topology is used by guest to acquire vcpu topology from
>> + * hypervisor.
>> + */
>> +#define XENMEM_get_cpu_topology     28
>> +
>> +struct xen_cpu_topology_info {
>> +    /* IN */
>> +    domid_t domid;
>> +    uint16_t pad;
>> +
>> +    /* IN/OUT */
>> +    uint32_t size;
>> +
>> +    /* OUT */
>> +    uint8_t core_per_socket;
>> +    uint8_t thread_per_core;
>> +    uint16_t pad2;
>> +
>> +    union {
>> +        XEN_GUEST_HANDLE(uint32) h;
>> +        uint64_t pad;
>> +    } tid;
>> +};
>> +typedef struct xen_cpu_topology_info xen_cpu_topology_info_t;
>> +DEFINE_XEN_GUEST_HANDLE(xen_cpu_topology_info_t);
>>   #endif /* __XEN_PUBLIC_MEMORY_H__ */
>> diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
>> index d6ddadc..3ac59c7 100644
>> --- a/xen/include/xsm/dummy.h
>> +++ b/xen/include/xsm/dummy.h
>> @@ -330,6 +330,12 @@ static XSM_INLINE int xsm_get_vnumainfo(XSM_DEFAULT_ARG 
>> struct domain *d)
>>       return xsm_default_action(action, current->domain, d);
>>   }
>> +static XSM_INLINE int xsm_get_cpu_topology(XSM_DEFAULT_ARG struct domain *d)
>> +{
>> +    XSM_ASSERT_ACTION(XSM_TARGET);
>> +    return xsm_default_action(action, current->domain, d);
>> +}
>> +
>>   #if defined(CONFIG_HAS_PASSTHROUGH) && defined(CONFIG_HAS_PCI)
>>   static XSM_INLINE int xsm_get_device_group(XSM_DEFAULT_ARG uint32_t 
>> machine_bdf)
>>   {
>> diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
>> index 479b103..98eb86f 100644
>> --- a/xen/xsm/dummy.c
>> +++ b/xen/xsm/dummy.c
>> @@ -88,6 +88,7 @@ void __init xsm_fixup_ops (struct xsm_operations *ops)
>>       set_to_dummy_if_null(ops, iomem_mapping);
>>       set_to_dummy_if_null(ops, pci_config_permission);
>>       set_to_dummy_if_null(ops, get_vnumainfo);
>> +    set_to_dummy_if_null(ops, get_cpu_topology);
>>   #if defined(CONFIG_HAS_PASSTHROUGH) && defined(CONFIG_HAS_PCI)
>>       set_to_dummy_if_null(ops, get_device_group);
>> diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
>> index 19ceacf..29ee1e1 100644
>> --- a/xen/xsm/flask/hooks.c
>> +++ b/xen/xsm/flask/hooks.c
>> @@ -427,6 +427,11 @@ static int flask_get_vnumainfo(struct domain *d)
>>       return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__GET_VNUMAINFO);
>>   }
>> +static int flask_get_cpu_topology(struct domain *d)
>> +{
>> +    return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__GET_CPU_TOPOLOGY);
>> +}
>> +
>>   static int flask_console_io(struct domain *d, int cmd)
>>   {
>>       u32 perm;
>> @@ -752,6 +757,9 @@ static int flask_domctl(struct domain *d, int cmd)
>>       case XEN_DOMCTL_set_gnttab_limits:
>>           return current_has_perm(d, SECCLASS_DOMAIN2, 
>> DOMAIN2__SET_GNTTAB_LIMITS);
>> +    case XEN_DOMCTL_set_cpu_topology:
>> +        return current_has_perm(d, SECCLASS_DOMAIN2, 
>> DOMAIN2__SET_CPU_TOPOLOGY);
>> +
>>       default:
>>           return avc_unknown_permission("domctl", cmd);
>>       }
>> @@ -1800,6 +1808,8 @@ static struct xsm_operations flask_ops = {
>>       .do_xsm_op = do_flask_op,
>>       .get_vnumainfo = flask_get_vnumainfo,
>> +    .get_cpu_topology = flask_get_cpu_topology,
>> +
>>       .vm_event_control = flask_vm_event_control,
>>   #ifdef CONFIG_HAS_MEM_ACCESS
>> diff --git a/xen/xsm/flask/policy/access_vectors 
>> b/xen/xsm/flask/policy/access_vectors
>> index d0a1ec5..e531ec0 100644
>> --- a/xen/xsm/flask/policy/access_vectors
>> +++ b/xen/xsm/flask/policy/access_vectors
>> @@ -250,6 +250,10 @@ class domain2
>>       psr_alloc
>>   # XEN_DOMCTL_set_gnttab_limits
>>       set_gnttab_limits
>> +# XEN_DOMCTL_set_cpu_topology
>> +    set_cpu_topology
>> +# XENMEM_get_cpu_topology
>> +    get_cpu_topology
>>   }
>>   # Similar to class domain, but primarily contains domctls related to HVM 
>> domains
>> 
>

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.