|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [v4][PATCH 2/9] xen:x86: define a new hypercall to get RMRR mappings
On 22/08/14 11:09, Tiejun Chen wrote:
> We need this new hypercall to get RMRR mapping for VM.
>
> Signed-off-by: Tiejun Chen <tiejun.chen@xxxxxxxxx>
> ---
> xen/arch/x86/mm.c | 71
> +++++++++++++++++++++++++++++++++++++++++++++
> xen/include/public/memory.h | 37 ++++++++++++++++++++++-
> 2 files changed, 107 insertions(+), 1 deletion(-)
>
> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
> index d23cb3f..e0d6650 100644
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -123,6 +123,7 @@
> #include <asm/setup.h>
> #include <asm/fixmap.h>
> #include <asm/pci.h>
> +#include <asm/acpi.h>
>
> /* Mapping of the fixmap space needed early. */
> l1_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
> @@ -4842,6 +4843,76 @@ long arch_memory_op(unsigned long cmd,
> XEN_GUEST_HANDLE_PARAM(void) arg)
> return rc;
> }
>
> + case XENMEM_reserved_device_memory_map:
> + {
> + struct xen_reserved_device_memory_map map;
> + XEN_GUEST_HANDLE(xen_reserved_device_memory_t) buffer;
> + XEN_GUEST_HANDLE_PARAM(xen_reserved_device_memory_t) buffer_param;
> + unsigned int i = 0;
> + static unsigned int nr_entries = 0;
> + static struct xen_reserved_device_memory *rmrr_map;
Absolutely not. This hypercall can easy be run concurrently.
> + struct acpi_rmrr_unit *rmrr;
> +
> + if ( copy_from_guest(&map, arg, 1) )
> + return -EFAULT;
> +
> + if ( !nr_entries )
> + /* Currently we just need to cover RMRR. */
> + list_for_each_entry( rmrr, &acpi_rmrr_units, list )
> + nr_entries++;
Maintain a global count as entries are added/removed from this list. It
is a waste of time recounting this list for each hypercall.
> +
> + if ( !nr_entries )
> + return -ENOENT;
> + else
> + {
> + if ( rmrr_map == NULL )
> + {
> + rmrr_map = xmalloc_array(xen_reserved_device_memory_t,
> + nr_entries);
You can do all of this without any memory allocation.
> + if ( rmrr_map == NULL )
> + {
> + return -ENOMEM;
> + }
> +
> + list_for_each_entry( rmrr, &acpi_rmrr_units, list )
> + {
> + rmrr_map[i].pfn = rmrr->base_address >> PAGE_SHIFT;
> + rmrr_map[i].count = PAGE_ALIGN(rmrr->end_address -
> + rmrr->base_address) /
> + PAGE_SIZE;
> + i++;
> + }
> + }
> + }
> +
> + if ( map.nr_entries < nr_entries )
> + {
> + map.nr_entries = nr_entries;
> + if ( copy_to_guest(arg, &map, 1) )
> + return -EFAULT;
> + return -ENOBUFS;
> + }
> +
> + map.nr_entries = nr_entries;
> + buffer_param = guest_handle_cast(map.buffer,
> + xen_reserved_device_memory_t);
> + buffer = guest_handle_from_param(buffer_param,
> + xen_reserved_device_memory_t);
> + if ( !guest_handle_okay(buffer, map.nr_entries) )
> + return -EFAULT;
> +
> + for ( i = 0; i < map.nr_entries; ++i )
> + {
> + if ( copy_to_guest_offset(buffer, i, rmrr_map + i, 1) )
> + return -EFAULT;
> + }
> +
> + if ( copy_to_guest(arg, &map, 1) )
> + return -EFAULT;
> +
> + return 0;
> + }
> +
> default:
> return subarch_memory_op(cmd, arg);
> }
> diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
> index 2c57aa0..8481843 100644
> --- a/xen/include/public/memory.h
> +++ b/xen/include/public/memory.h
> @@ -523,7 +523,42 @@ DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_op_t);
>
> #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
>
> -/* Next available subop number is 26 */
> +/*
> + * Some devices may reserve some range.
"range" is not a useful unit of measure.
> + *
> + * Currently we just have RMRR
> + * - Reserved memory Region Reporting Structure,
> + * So returns the RMRR memory map as it was when the domain
> + * was started.
> + */
> +#define XENMEM_reserved_device_memory_map 26
> +struct xen_reserved_device_memory {
xen_mem_ to match the prevailing style
> + /* PFN of the current mapping of the page. */
> + xen_pfn_t pfn;
> + /* Number of the current mapping pages. */
> + xen_ulong_t count;
> +};
This struct marks a range, but the fields don't make it clear. I would
suggest "start" and "nr_frames" as names.
~Andrew
> +typedef struct xen_reserved_device_memory xen_reserved_device_memory_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_t);
> +
> +struct xen_reserved_device_memory_map {
> + /*
> + * On call the number of entries which can be stored in buffer. On
> + * return the number of entries which have been stored in
> + * buffer.
> + */
> + unsigned int nr_entries;
> +
> + /*
> + * Entries in the buffer are in the same format as
> + * xen_reserved_device_memory.
> + */
> + XEN_GUEST_HANDLE(void) buffer;
> +};
> +typedef struct xen_reserved_device_memory_map
> xen_reserved_device_memory_map_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_map_t);
> +
> +/* Next available subop number is 27 */
>
> #endif /* __XEN_PUBLIC_MEMORY_H__ */
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |