[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 02/11] acpi: Define ACPI IO registers for PVH guests



On 09/11/16 14:39, Boris Ostrovsky wrote:
> ACPI hotplug-related IO accesses (to GPE0 block) are handled
> by qemu for HVM guests. Since PVH guests don't have qemu these
> accesses will need to be procesed by the hypervisor.
>
> Because ACPI event model expects pm1a block to be present we
> need to have the hypervisor emulate it as well.
>
> Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
> ---
> CC: Julien Grall <julien.grall@xxxxxxx>
> CC: Paul Durrant <paul.durrant@xxxxxxxxxx>
> ---
> Changes in v2:
> * Added public macros for ACPI CPU bitmap (at 0xaf00). Note: PRST
>   region shrinks from 32 bytes to 16 (when 128 VCPUs are
>   supported).
>
>
>  tools/libacpi/mk_dsdt.c          |  4 +++-
>  tools/libacpi/static_tables.c    | 28 +++++++++++-----------------
>  xen/include/asm-x86/hvm/domain.h |  6 ++++++
>  xen/include/public/arch-arm.h    | 11 ++++++++---
>  xen/include/public/hvm/ioreq.h   | 13 +++++++++++++
>  5 files changed, 41 insertions(+), 21 deletions(-)
>
> diff --git a/tools/libacpi/mk_dsdt.c b/tools/libacpi/mk_dsdt.c
> index 4ae68bc..2b8234d 100644
> --- a/tools/libacpi/mk_dsdt.c
> +++ b/tools/libacpi/mk_dsdt.c
> @@ -19,6 +19,7 @@
>  #include <stdbool.h>
>  #if defined(__i386__) || defined(__x86_64__)
>  #include <xen/hvm/hvm_info_table.h>
> +#include <xen/hvm/ioreq.h>
>  #elif defined(__aarch64__)
>  #include <xen/arch-arm.h>
>  #endif
> @@ -244,7 +245,8 @@ int main(int argc, char **argv)
>  #endif
>  
>      /* Operation Region 'PRST': bitmask of online CPUs. */
> -    stmt("OperationRegion", "PRST, SystemIO, 0xaf00, 32");
> +    stmt("OperationRegion", "PRST, SystemIO, 0x%x, %d",
> +        ACPI_CPU_MAP, ACPI_CPU_MAP_LEN);
>      push_block("Field", "PRST, ByteAcc, NoLock, Preserve");
>      indent(); printf("PRS, %u\n", max_cpus);
>      pop_block();
> diff --git a/tools/libacpi/static_tables.c b/tools/libacpi/static_tables.c
> index 617bf68..413abcc 100644
> --- a/tools/libacpi/static_tables.c
> +++ b/tools/libacpi/static_tables.c
> @@ -20,6 +20,8 @@
>   * Firmware ACPI Control Structure (FACS).
>   */
>  
> +#define ACPI_REG_BIT_OFFSET    0
> +
>  struct acpi_20_facs Facs = {
>      .signature = ACPI_2_0_FACS_SIGNATURE,
>      .length    = sizeof(struct acpi_20_facs),
> @@ -30,14 +32,6 @@ struct acpi_20_facs Facs = {
>  /*
>   * Fixed ACPI Description Table (FADT).
>   */
> -
> -#define ACPI_PM1A_EVT_BLK_BIT_WIDTH         0x20
> -#define ACPI_PM1A_EVT_BLK_BIT_OFFSET        0x00
> -#define ACPI_PM1A_CNT_BLK_BIT_WIDTH         0x10
> -#define ACPI_PM1A_CNT_BLK_BIT_OFFSET        0x00
> -#define ACPI_PM_TMR_BLK_BIT_WIDTH           0x20
> -#define ACPI_PM_TMR_BLK_BIT_OFFSET          0x00
> -
>  struct acpi_20_fadt Fadt = {
>      .header = {
>          .signature    = ACPI_2_0_FADT_SIGNATURE,
> @@ -56,9 +50,9 @@ struct acpi_20_fadt Fadt = {
>      .pm1a_cnt_blk = ACPI_PM1A_CNT_BLK_ADDRESS_V1,
>      .pm_tmr_blk = ACPI_PM_TMR_BLK_ADDRESS_V1,
>      .gpe0_blk = ACPI_GPE0_BLK_ADDRESS_V1,
> -    .pm1_evt_len = ACPI_PM1A_EVT_BLK_BIT_WIDTH / 8,
> -    .pm1_cnt_len = ACPI_PM1A_CNT_BLK_BIT_WIDTH / 8,
> -    .pm_tmr_len = ACPI_PM_TMR_BLK_BIT_WIDTH / 8,
> +    .pm1_evt_len = ACPI_PM1A_EVT_BLK_LEN,
> +    .pm1_cnt_len = ACPI_PM1A_CNT_BLK_LEN,
> +    .pm_tmr_len = ACPI_PM_TMR_BLK_LEN,
>      .gpe0_blk_len = ACPI_GPE0_BLK_LEN_V1,
>  
>      .p_lvl2_lat = 0x0fff, /* >100,  means we do not support C2 state */
> @@ -79,22 +73,22 @@ struct acpi_20_fadt Fadt = {
>  
>      .x_pm1a_evt_blk = {
>          .address_space_id    = ACPI_SYSTEM_IO,
> -        .register_bit_width  = ACPI_PM1A_EVT_BLK_BIT_WIDTH,
> -        .register_bit_offset = ACPI_PM1A_EVT_BLK_BIT_OFFSET,
> +        .register_bit_width  = ACPI_PM1A_EVT_BLK_LEN * 8,
> +        .register_bit_offset = ACPI_REG_BIT_OFFSET,
>          .address             = ACPI_PM1A_EVT_BLK_ADDRESS_V1,
>      },
>  
>      .x_pm1a_cnt_blk = {
>          .address_space_id    = ACPI_SYSTEM_IO,
> -        .register_bit_width  = ACPI_PM1A_CNT_BLK_BIT_WIDTH,
> -        .register_bit_offset = ACPI_PM1A_CNT_BLK_BIT_OFFSET,
> +        .register_bit_width  = ACPI_PM1A_CNT_BLK_LEN * 8,
> +        .register_bit_offset = ACPI_REG_BIT_OFFSET,
>          .address             = ACPI_PM1A_CNT_BLK_ADDRESS_V1,
>      },
>  
>      .x_pm_tmr_blk = {
>          .address_space_id    = ACPI_SYSTEM_IO,
> -        .register_bit_width  = ACPI_PM_TMR_BLK_BIT_WIDTH,
> -        .register_bit_offset = ACPI_PM_TMR_BLK_BIT_OFFSET,
> +        .register_bit_width  = ACPI_PM_TMR_BLK_LEN * 8,
> +        .register_bit_offset = ACPI_REG_BIT_OFFSET,
>          .address             = ACPI_PM_TMR_BLK_ADDRESS_V1,
>      }
>  };
> diff --git a/xen/include/asm-x86/hvm/domain.h 
> b/xen/include/asm-x86/hvm/domain.h
> index f34d784..f492a2b 100644
> --- a/xen/include/asm-x86/hvm/domain.h
> +++ b/xen/include/asm-x86/hvm/domain.h
> @@ -87,6 +87,12 @@ struct hvm_domain {
>      } ioreq_server;
>      struct hvm_ioreq_server *default_ioreq_server;
>  
> +    /* PVH guests */
> +    struct {
> +        uint8_t pm1a[ACPI_PM1A_EVT_BLK_LEN];
> +        uint8_t gpe[ACPI_GPE0_BLK_LEN_V1];
> +    } acpi_io;
> +
>      /* Cached CF8 for guest PCI config cycles */
>      uint32_t                pci_cf8;
>  
> diff --git a/xen/include/public/arch-arm.h b/xen/include/public/arch-arm.h
> index bd974fb..b793774 100644
> --- a/xen/include/public/arch-arm.h
> +++ b/xen/include/public/arch-arm.h
> @@ -383,6 +383,9 @@ typedef uint64_t xen_callback_t;
>   * should instead use the FDT.
>   */
>  
> +/* Current supported guest VCPUs */
> +#define GUEST_MAX_VCPUS 128
> +
>  /* Physical Address Space */
>  
>  /*
> @@ -410,6 +413,11 @@ typedef uint64_t xen_callback_t;
>  #define GUEST_ACPI_BASE 0x20000000ULL
>  #define GUEST_ACPI_SIZE 0x02000000ULL
>  
> +/* Location of online VCPU bitmap. */
> +#define ACPI_CPU_MAP                 0xaf00
> +#define ACPI_CPU_MAP_LEN             ((GUEST_MAX_VCPUS / 8) + \
> +                                      ((GUEST_MAX_VCPUS & 7) ? 1 : 0))
> +
>  /*
>   * 16MB == 4096 pages reserved for guest to use as a region to map its
>   * grant table in.
> @@ -435,9 +443,6 @@ typedef uint64_t xen_callback_t;
>  #define GUEST_RAM_BANK_BASES   { GUEST_RAM0_BASE, GUEST_RAM1_BASE }
>  #define GUEST_RAM_BANK_SIZES   { GUEST_RAM0_SIZE, GUEST_RAM1_SIZE }
>  
> -/* Current supported guest VCPUs */
> -#define GUEST_MAX_VCPUS 128
> -
>  /* Interrupts */
>  #define GUEST_TIMER_VIRT_PPI    27
>  #define GUEST_TIMER_PHYS_S_PPI  29
> diff --git a/xen/include/public/hvm/ioreq.h b/xen/include/public/hvm/ioreq.h
> index 2e5809b..e3fa704 100644
> --- a/xen/include/public/hvm/ioreq.h
> +++ b/xen/include/public/hvm/ioreq.h
> @@ -24,6 +24,8 @@
>  #ifndef _IOREQ_H_
>  #define _IOREQ_H_
>  
> +#include "hvm_info_table.h" /* HVM_MAX_VCPUS */
> +
>  #define IOREQ_READ      1
>  #define IOREQ_WRITE     0
>  
> @@ -124,6 +126,17 @@ typedef struct buffered_iopage buffered_iopage_t;
>  #define ACPI_GPE0_BLK_ADDRESS        ACPI_GPE0_BLK_ADDRESS_V0
>  #define ACPI_GPE0_BLK_LEN            ACPI_GPE0_BLK_LEN_V0
>  
> +#define ACPI_PM1A_EVT_BLK_LEN        0x04
> +#define ACPI_PM1A_CNT_BLK_LEN        0x02
> +#define ACPI_PM_TMR_BLK_LEN          0x04
> +
> +/* Location of online VCPU bitmap. */
> +#define ACPI_CPU_MAP                 0xaf00
> +#define ACPI_CPU_MAP_LEN             ((HVM_MAX_VCPUS / 8) + \
> +                                      ((HVM_MAX_VCPUS & 7) ? 1 : 0))
> +#if ACPI_CPU_MAP + ACPI_CPU_MAP_LEN >= ACPI_GPE0_BLK_ADDRESS_V1
> +#error "ACPI_CPU_MAP is too big"
> +#endif

Why is this in ioreq.h?  It has nothing to do with ioreq's.

The current ACPI bits in here are to do with the qemu ACPI interface,
not the Xen ACPI interface.

Also, please can we avoid hard-coding the location of the map in the
hypervisor ABI.  These constants make it impossible to ever extend the
number of HVM vcpus at a future date.

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.