[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH RFC 18/20] libxc/acpi: Build ACPI tables for HVMlite guests



On Tue, Apr 05, 2016 at 09:25:47PM -0400, Boris Ostrovsky wrote:
> Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
> ---
>  tools/libxc/Makefile              |  22 +++-
>  tools/libxc/include/xc_dom.h      |   1 +
>  tools/libxc/xc_acpi.c             | 268 
> ++++++++++++++++++++++++++++++++++++++
>  tools/libxc/xc_dom_x86.c          |   7 +
>  tools/libxl/libxl_x86.c           |  19 +--
>  xen/common/libacpi/Makefile       |   5 +-
>  xen/common/libacpi/acpi2_0.h      |   2 +-
>  xen/common/libacpi/build.c        |   2 +-
>  xen/common/libacpi/dsdt_empty.asl |  22 ++++
>  9 files changed, 335 insertions(+), 13 deletions(-)
>  create mode 100644 tools/libxc/xc_acpi.c
>  create mode 100644 xen/common/libacpi/dsdt_empty.asl
> 

I've only done a very rough review.

> diff --git a/tools/libxc/Makefile b/tools/libxc/Makefile
> index 608404f..9569e54 100644
> --- a/tools/libxc/Makefile
> +++ b/tools/libxc/Makefile
> @@ -79,6 +79,26 @@ GUEST_SRCS-y += $(ELF_SRCS-y)
>  $(patsubst %.c,%.o,$(ELF_SRCS-y)): CFLAGS += -Wno-pointer-sign
>  $(patsubst %.c,%.opic,$(ELF_SRCS-y)): CFLAGS += -Wno-pointer-sign
>  
> +ACPI_PATH = $(XEN_ROOT)/xen/common/libacpi
> +vpath %.c $(ACPI_PATH)
> +ACPI_FILES  = dsdt_anycpu.c dsdt_15cpu.c static_tables.c
> +ACPI_FILES += dsdt_anycpu_qemu_xen.c dsdt_empty.c build.c
> +ACPI_SRCS = $(patsubst %.c,$(ACPI_PATH)/%.c,$(ACPI_FILES))
> +
> +.NOTPARALLEL: $(ACPI_SRCS)

Why is this needed?

> +$(ACPI_SRCS):
> +     make -C $(ACPI_PATH)
> +
> +$(patsubst %.c,%.o,$(ACPI_SRCS)): CFLAGS += -I$(ACPI_PATH)
> +$(patsubst %.c,%.opic,$(ACPI_SRCS)): CFLAGS += -I$(ACPI_PATH)
> +xc_acpi.o: CFLAGS += -I$(ACPI_PATH)
> +xc_acpi.opic: CFLAGS += -I$(ACPI_PATH)
> +
> +GUEST_SRCS-y += $(ACPI_FILES)
> +
> +$(patsubst %.c,%.o,$(ELF_SRCS-y)): CFLAGS += -Wno-pointer-sign
> +$(patsubst %.c,%.opic,$(ELF_SRCS-y)): CFLAGS += -Wno-pointer-sign
> +
>  # new domain builder
>  GUEST_SRCS-y                 += xc_dom_core.c xc_dom_boot.c
>  GUEST_SRCS-y                 += xc_dom_elfloader.c
> @@ -89,7 +109,7 @@ GUEST_SRCS-$(CONFIG_ARM)     += xc_dom_armzimageloader.c
>  GUEST_SRCS-y                 += xc_dom_binloader.c
>  GUEST_SRCS-y                 += xc_dom_compat_linux.c
>  
> -GUEST_SRCS-$(CONFIG_X86)     += xc_dom_x86.c
> +GUEST_SRCS-$(CONFIG_X86)     += xc_dom_x86.c xc_acpi.c
>  GUEST_SRCS-$(CONFIG_X86)     += xc_cpuid_x86.c
>  GUEST_SRCS-$(CONFIG_ARM)     += xc_dom_arm.c
>  
> diff --git a/tools/libxc/include/xc_dom.h b/tools/libxc/include/xc_dom.h
> index 6ebe946..3842fb1 100644
> --- a/tools/libxc/include/xc_dom.h
> +++ b/tools/libxc/include/xc_dom.h
> @@ -407,6 +407,7 @@ void *xc_dom_pfn_to_ptr_retcount(struct xc_dom_image 
> *dom, xen_pfn_t first,
>                                   xen_pfn_t count, xen_pfn_t *count_out);
>  void xc_dom_unmap_one(struct xc_dom_image *dom, xen_pfn_t pfn);
>  void xc_dom_unmap_all(struct xc_dom_image *dom);
> +int xc_dom_build_acpi(struct xc_dom_image *dom);
>  
>  static inline void *xc_dom_seg_to_ptr_pages(struct xc_dom_image *dom,
>                                        struct xc_dom_seg *seg,
> diff --git a/tools/libxc/xc_acpi.c b/tools/libxc/xc_acpi.c
> new file mode 100644
> index 0000000..3173d49
> --- /dev/null
> +++ b/tools/libxc/xc_acpi.c
> @@ -0,0 +1,268 @@
> +#include <stdio.h>
> +#include <stdlib.h>
> +#include <string.h>
> +#include <inttypes.h>
> +#include <assert.h>
> +
> +#include <xen/xen.h>
> +#include <xen/foreign/x86_32.h>
> +#include <xen/foreign/x86_64.h>
> +#include <xen/hvm/hvm_info_table.h>
> +#include <xen/io/protocols.h>
> +
> +#include "xg_private.h"
> +#include "xc_dom.h"
> +#include "xenctrl.h"
> +
> +#include "acpi2_0.h"
> +
> +#define RESERVED_MEMORY_DYNAMIC_START 0xFC001000
> +#define ACPI_PHYSICAL_ADDRESS         0x000EA020
> +
> +/* Initial allocation for ACPI tables */
> +#define NUM_ACPI_PAGES  16
> +
> +#define PFN(paddr)  ((paddr) >> PAGE_SHIFT)
> +
> +extern unsigned char dsdt_anycpu[], dsdt_15cpu[], dsdt_empty[];
> +extern int dsdt_anycpu_len, dsdt_15cpu_len, dsdt_empty_len;
> +
> +static uint64_t alloc_up, alloc_down;
> +static unsigned long base_addr;
> +
> +/* Assumes contiguous physical space */
> +static unsigned long virt_to_phys(void *v)
> +{
> +     return (((unsigned long)v - base_addr) + RESERVED_MEMORY_DYNAMIC_START);
> +}
> +
> +static void *mem_alloc(uint32_t size, uint32_t align)
> +{
> +    uint64_t s, e;
> +
> +    /* Align to at least 16 bytes. */
> +    if ( align < 16 )
> +        align = 16;
> +
> +    s = (alloc_up + align) & ~((uint64_t)align - 1);
> +    e = s + size - 1;
> +
> +    /* TODO: Reallocate memory */
> +    if ((e < s) || (e >= alloc_down)) return NULL;
> +
> +    while ( PFN(alloc_up) != PFN(e) )
> +    {
> +        alloc_up += PAGE_SIZE;
> +    }
> +
> +    alloc_up = e;
> +
> +    return (void *)(unsigned long)s;
> +}
> +
> +static int init_acpi_config(struct xc_dom_image *dom,
> +                            struct acpi_config *config)
> +{
> +    xc_interface *xch = dom->xch;
> +    uint32_t domid = dom->guest_domid;
> +    xc_dominfo_t info;
> +    int i, rc;
> +
> +    memset(config, 0, sizeof(*config));
> +
> +    config->dsdt_anycpu = config->dsdt_15cpu = dsdt_empty;
> +    config->dsdt_anycpu_len = config->dsdt_15cpu_len = dsdt_empty_len;
> +
> +    rc = xc_domain_getinfo(xch, domid, 1, &info);
> +    if ( rc < 0 )
> +    {
> +        DOMPRINTF("%s: getdomaininfo failed (rc=%d)", __FUNCTION__, rc);
> +        return rc;
> +    }
> +
> +    config->apic_mode = 1;
> +
> +    if ( dom->nr_vnodes )
> +    {
> +        struct acpi_numa *numa = &config->numa;
> +
> +        numa->vmemrange = calloc(dom->nr_vmemranges,
> +                                 sizeof(*numa->vmemrange));
> +        numa->vdistance = calloc(dom->nr_vnodes,
> +                                 sizeof(*numa->vdistance));
> +        numa->vcpu_to_vnode = calloc(config->nr_vcpus,
> +                                     sizeof(*numa->vcpu_to_vnode));
> +        if ( !numa->vmemrange || !numa->vdistance || !numa->vcpu_to_vnode )
> +        {
> +            DOMPRINTF("%s: Out of memory", __FUNCTION__);
> +            free(numa->vmemrange);
> +            free(numa->vdistance);
> +            free(numa->vcpu_to_vnode);
> +            return -ENOMEM;
> +        }
> +
> +        rc = xc_domain_getvnuma(xch, domid, &numa->nr_vnodes,
> +                                &numa->nr_vmemranges,
> +                                &config->nr_vcpus, numa->vmemrange,
> +                                numa->vdistance, numa->vcpu_to_vnode);
> +
> +         if ( rc )
> +        {
> +            DOMPRINTF("%s: xc_domain_getvnuma failed (rc=%d)", __FUNCTION__, 
> rc);
> +            return rc;
> +        }
> +    }
> +    else
> +        config->nr_vcpus = info.max_vcpu_id + 1;

This looks wrong, at least it is not immediately clear why you would
want to do this.

> +
> +    config->vcpu_online = calloc((HVM_MAX_VCPUS + 7) / 8,
> +                                 sizeof(*config->vcpu_online));
> +    if ( config->vcpu_online == NULL )
> +    {
> +        DOMPRINTF("%s: Can't allocate vcpu_online", __FUNCTION__);
> +        return -ENOMEM;
> +    }
> +
> +    for (i=0; i<config->nr_vcpus; i++)

Coding style.

> +        config->vcpu_online[i / 8] |= 1 << (i & 7);
> +
> +    config->mem_ops.alloc = mem_alloc;
> +    config->mem_ops.v2p = virt_to_phys;
> +
> +    return 0;
> +}
> +
> +int xc_dom_build_acpi(struct xc_dom_image *dom)
> +{
> +    struct acpi_config config;
> +    uint32_t domid = dom->guest_domid;
> +    xc_interface *xch = dom->xch;
> +    int rc, i, acpi_pages_num;
> +    xen_pfn_t extent, *extents;
> +    void *acpi_pages, *acpi_physical;
> +    void *guest_info_page, *guest_acpi_pages;
> +

Need to initialise these to NULL and 0 in case you take the exit path
earlier.

Wei.

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.