[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v5 3/3] xen-hvm: Pass is_default to xen_hvm_init



On Fri, Jun 06, 2014 at 01:52:06PM -0400, Don Slutz wrote:
> This is the xen part of "pc & q35: Add new machine opt max-ram-below-4g"
> 
> Acked-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
> Signed-off-by: Don Slutz <dslutz@xxxxxxxxxxx>
> ---
> v5:
>   Added Acked-by: Stefano Stabellini
>   Minor change of pmc to pcms.
> 
> 
>  hw/i386/pc_piix.c    |  1 +
>  hw/i386/pc_q35.c     |  1 +
>  include/hw/xen/xen.h |  2 +-
>  xen-hvm-stub.c       |  2 +-
>  xen-hvm.c            | 36 ++++++++++++++++++++----------------
>  5 files changed, 24 insertions(+), 18 deletions(-)
> 
> diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
> index 25f4727..27851c6 100644
> --- a/hw/i386/pc_piix.c
> +++ b/hw/i386/pc_piix.c
> @@ -125,6 +125,7 @@ static void pc_init1(MachineState *machine,
>      }
>  
>      if (xen_enabled() && xen_hvm_init(&below_4g_mem_size, &above_4g_mem_size,
> +                                      !(pcms && pcms->max_ram_below_4g),
>                                        &ram_memory) != 0) {
>          fprintf(stderr, "xen hardware virtual machine initialisation 
> failed\n");
>          exit(1);
> diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
> index 155cdf1..6436fce 100644
> --- a/hw/i386/pc_q35.c
> +++ b/hw/i386/pc_q35.c
> @@ -114,6 +114,7 @@ static void pc_q35_init(MachineState *machine)
>      }
>  
>      if (xen_enabled() && xen_hvm_init(&below_4g_mem_size, &above_4g_mem_size,
> +                                      !(pcms && pcms->max_ram_below_4g),
>                                        &ram_memory) != 0) {
>          fprintf(stderr, "xen hardware virtual machine initialisation 
> failed\n");
>          exit(1);
> diff --git a/include/hw/xen/xen.h b/include/hw/xen/xen.h
> index f71f2d8..6b94c14 100644
> --- a/include/hw/xen/xen.h
> +++ b/include/hw/xen/xen.h
> @@ -41,7 +41,7 @@ void xenstore_store_pv_console_info(int i, struct 
> CharDriverState *chr);
>  
>  #if defined(NEED_CPU_H) && !defined(CONFIG_USER_ONLY)
>  int xen_hvm_init(ram_addr_t *below_4g_mem_size, ram_addr_t 
> *above_4g_mem_size,
> -                 MemoryRegion **ram_memory);
> +                 bool is_default, MemoryRegion **ram_memory);
>  void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size,
>                     struct MemoryRegion *mr);
>  void xen_modified_memory(ram_addr_t start, ram_addr_t length);
> diff --git a/xen-hvm-stub.c b/xen-hvm-stub.c
> index 2d98696..d1bdb76 100644
> --- a/xen-hvm-stub.c
> +++ b/xen-hvm-stub.c
> @@ -52,7 +52,7 @@ void xen_modified_memory(ram_addr_t start, ram_addr_t 
> length)
>  }
>  
>  int xen_hvm_init(ram_addr_t *below_4g_mem_size, ram_addr_t 
> *above_4g_mem_size,
> -                 MemoryRegion **ram_memory)
> +                 bool is_default, MemoryRegion **ram_memory)
>  {
>      return 0;
>  }
> diff --git a/xen-hvm.c b/xen-hvm.c
> index a0b6b5d..8d27ee6 100644
> --- a/xen-hvm.c
> +++ b/xen-hvm.c
> @@ -156,31 +156,34 @@ qemu_irq *xen_interrupt_controller_init(void)
>  /* Memory Ops */
>  
>  static void xen_ram_init(ram_addr_t *below_4g_mem_size,
> -                         ram_addr_t *above_4g_mem_size,
> +                         ram_addr_t *above_4g_mem_size, bool is_default,
>                           ram_addr_t ram_size, MemoryRegion **ram_memory_p)
>  {
>      MemoryRegion *sysmem = get_system_memory();
>      ram_addr_t block_len;
>  
> -    block_len = ram_size;
> -    if (ram_size >= HVM_BELOW_4G_RAM_END) {
> -        /* Xen does not allocate the memory continuously, and keep a hole at
> -         * HVM_BELOW_4G_MMIO_START of HVM_BELOW_4G_MMIO_LENGTH
> +    if (is_default) {
> +        if (ram_size >= HVM_BELOW_4G_RAM_END) {
> +            *above_4g_mem_size = ram_size - HVM_BELOW_4G_RAM_END;
> +            *below_4g_mem_size = HVM_BELOW_4G_RAM_END;
> +        } else {
> +            *above_4g_mem_size = 0;
> +            *below_4g_mem_size = ram_size;
> +        }
> +    }
> +    if (!*above_4g_mem_size) {
> +        block_len = ram_size;
> +    } else {
> +        /*
> +         * Xen does not allocate the memory continuously, and keep a hole of
> +         * of the size computed above or passed in.

of of -> typo

>           */
> -        block_len += HVM_BELOW_4G_MMIO_LENGTH;
> +        block_len = (1ULL << 32) + *above_4g_mem_size;
>      }
>      memory_region_init_ram(&ram_memory, NULL, "xen.ram", block_len);
>      *ram_memory_p = &ram_memory;
>      vmstate_register_ram_global(&ram_memory);
>  
> -    if (ram_size >= HVM_BELOW_4G_RAM_END) {
> -        *above_4g_mem_size = ram_size - HVM_BELOW_4G_RAM_END;
> -        *below_4g_mem_size = HVM_BELOW_4G_RAM_END;
> -    } else {
> -        *above_4g_mem_size = 0;
> -        *below_4g_mem_size = ram_size;
> -    }
> -
>      memory_region_init_alias(&ram_640k, NULL, "xen.ram.640k",
>                               &ram_memory, 0, 0xa0000);
>      memory_region_add_subregion(sysmem, 0, &ram_640k);
> @@ -962,7 +965,7 @@ static void xen_wakeup_notifier(Notifier *notifier, void 
> *data)
>  }
>  
>  int xen_hvm_init(ram_addr_t *below_4g_mem_size, ram_addr_t 
> *above_4g_mem_size,
> -                 MemoryRegion **ram_memory)
> +                 bool is_default, MemoryRegion **ram_memory)
>  {
>      int i, rc;
>      unsigned long ioreq_pfn;
> @@ -1040,7 +1043,8 @@ int xen_hvm_init(ram_addr_t *below_4g_mem_size, 
> ram_addr_t *above_4g_mem_size,
>  
>      /* Init RAM management */
>      xen_map_cache_init(xen_phys_offset_to_gaddr, state);
> -    xen_ram_init(below_4g_mem_size, above_4g_mem_size, ram_size, ram_memory);
> +    xen_ram_init(below_4g_mem_size, above_4g_mem_size, is_default, ram_size,
> +                 ram_memory);
>  
>      qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state);
>  
> -- 
> 1.8.4

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.