[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v17 06/11] x86/hvm/ioreq: add a new mappable resource type...
... XENMEM_resource_ioreq_server This patch adds support for a new resource type that can be mapped using the XENMEM_acquire_resource memory op. If an emulator makes use of this resource type then, instead of mapping gfns, the IOREQ server will allocate pages from the heap. These pages will never be present in the P2M of the guest at any point and so are not vulnerable to any direct attack by the guest. They are only ever accessible by Xen and any domain that has mapping privilege over the guest (which may or may not be limited to the domain running the emulator). Because an emulator may continue to hold references to the pages beyond initial domain tear-down, it is important that they are not freed during the normal ioreq server tear-down. Instead a per-domain free-list of pages is maintained and pages in this list are not freed until final domain destruction. NOTE: Use of the new resource type is not compatible with use of XEN_DMOP_get_ioreq_server_info unless the XEN_DMOP_no_gfns flag is set. Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx> --- Cc: Jan Beulich <jbeulich@xxxxxxxx> Cc: George Dunlap <George.Dunlap@xxxxxxxxxxxxx> Cc: Wei Liu <wei.liu2@xxxxxxxxxx> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx> Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx> Cc: Tim Deegan <tim@xxxxxxx> Cc: Julien Grall <julien.grall@xxxxxxx> v17: - The use of xenheap pages means that freeing needs to be deferred until domain destruction. Add an explanatory paragraph to the commit comment. v15: - Use xenheap pages rather than domheap pages and assign ownership to target domain. v14: - Addressed more comments from Jan. v13: - Introduce an arch_acquire_resource() as suggested by Julien (and have the ARM varient simply return -EOPNOTSUPP). - Check for ioreq server id truncation as requested by Jan. - Not added Jan's R-b due to substantive change from v12. v12: - Addressed more comments from Jan. - Dropped George's A-b and Wei's R-b because of material change. v11: - Addressed more comments from Jan. v10: - Addressed comments from Jan. v8: - Re-base on new boilerplate. - Adjust function signature of hvm_get_ioreq_server_frame(), and test whether the bufioreq page is present. v5: - Use get_ioreq_server() function rather than indexing array directly. - Add more explanation into comments to state than mapping guest frames and allocation of pages for ioreq servers are not simultaneously permitted. - Add a comment into asm/ioreq.h stating the meaning of the index value passed to hvm_get_ioreq_server_frame(). --- xen/arch/x86/hvm/hvm.c | 2 + xen/arch/x86/hvm/ioreq.c | 154 +++++++++++++++++++++++++++++++++++++++ xen/arch/x86/mm.c | 41 +++++++++++ xen/common/memory.c | 3 +- xen/include/asm-arm/mm.h | 7 ++ xen/include/asm-x86/hvm/domain.h | 1 + xen/include/asm-x86/hvm/ioreq.h | 3 + xen/include/asm-x86/mm.h | 5 ++ xen/include/public/hvm/dm_op.h | 4 + xen/include/public/memory.h | 9 +++ 10 files changed, 228 insertions(+), 1 deletion(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 28bc7e4252..0d7a2f984b 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -726,6 +726,8 @@ void hvm_domain_destroy(struct domain *d) list_del(&ioport->list); xfree(ioport); } + + hvm_ioreq_deinit(d); } static int hvm_save_tsc_adjust(struct domain *d, hvm_domain_context_t *h) diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c index 1e6f0f41e4..1568224f08 100644 --- a/xen/arch/x86/hvm/ioreq.c +++ b/xen/arch/x86/hvm/ioreq.c @@ -259,6 +259,19 @@ static int hvm_map_ioreq_gfn(struct hvm_ioreq_server *s, bool buf) struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq; int rc; + if ( iorp->page ) + { + /* + * If a page has already been allocated (which will happen on + * demand if hvm_get_ioreq_server_frame() is called), then + * mapping a guest frame is not permitted. + */ + if ( gfn_eq(iorp->gfn, INVALID_GFN) ) + return -EPERM; + + return 0; + } + if ( d->is_dying ) return -EINVAL; @@ -281,6 +294,56 @@ static int hvm_map_ioreq_gfn(struct hvm_ioreq_server *s, bool buf) return rc; } +static int hvm_alloc_ioreq_mfn(struct hvm_ioreq_server *s, bool buf) +{ + struct domain *d = s->domain; + struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq; + + if ( iorp->page ) + { + /* + * If a guest frame has already been mapped (which may happen + * on demand if hvm_get_ioreq_server_info() is called), then + * allocating a page is not permitted. + */ + if ( !gfn_eq(iorp->gfn, INVALID_GFN) ) + return -EPERM; + + return 0; + } + + iorp->page = + page_list_remove_head(&d->arch.hvm_domain.ioreq_server.pages); + if ( !iorp->page ) + { + iorp->va = alloc_xenheap_page(); + if ( !iorp->va ) + return -ENOMEM; + + iorp->page = virt_to_page(iorp->va); + share_xen_page_with_guest(iorp->page, d, XENSHARE_writable); + } + else + iorp->va = page_to_virt(iorp->page); + + clear_page(iorp->va); + return 0; +} + +static void hvm_free_ioreq_mfn(struct hvm_ioreq_server *s, bool buf) +{ + struct domain *d = s->domain; + struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq; + + if ( !iorp->page ) + return; + + page_list_add_tail(iorp->page, &d->arch.hvm_domain.ioreq_server.pages); + + iorp->page = NULL; + iorp->va = NULL; +} + bool is_ioreq_server_page(struct domain *d, const struct page_info *page) { const struct hvm_ioreq_server *s; @@ -484,6 +547,27 @@ static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s) hvm_unmap_ioreq_gfn(s, false); } +static int hvm_ioreq_server_alloc_pages(struct hvm_ioreq_server *s) +{ + int rc; + + rc = hvm_alloc_ioreq_mfn(s, false); + + if ( !rc && (s->bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF) ) + rc = hvm_alloc_ioreq_mfn(s, true); + + if ( rc ) + hvm_free_ioreq_mfn(s, false); + + return rc; +} + +static void hvm_ioreq_server_free_pages(struct hvm_ioreq_server *s) +{ + hvm_free_ioreq_mfn(s, true); + hvm_free_ioreq_mfn(s, false); +} + static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s) { unsigned int i; @@ -631,7 +715,19 @@ static void hvm_ioreq_server_deinit(struct hvm_ioreq_server *s) { ASSERT(!s->enabled); hvm_ioreq_server_remove_all_vcpus(s); + + /* + * NOTE: It is safe to call both hvm_ioreq_server_unmap_pages() and + * hvm_ioreq_server_free_pages() in that order. + * This is because the former will do nothing if the pages + * are not mapped, leaving the page to be freed by the latter. + * However if the pages are mapped then the former will set + * the page_info pointer to NULL, meaning the latter will do + * nothing. + */ hvm_ioreq_server_unmap_pages(s); + hvm_ioreq_server_free_pages(s); + hvm_ioreq_server_free_rangesets(s); } @@ -787,6 +883,52 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id, return rc; } +int hvm_get_ioreq_server_frame(struct domain *d, ioservid_t id, + unsigned long idx, mfn_t *mfn) +{ + struct hvm_ioreq_server *s; + int rc; + + spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock); + + if ( id == DEFAULT_IOSERVID ) + return -EOPNOTSUPP; + + s = get_ioreq_server(d, id); + + ASSERT(!IS_DEFAULT(s)); + + rc = hvm_ioreq_server_alloc_pages(s); + if ( rc ) + goto out; + + switch ( idx ) + { + case XENMEM_resource_ioreq_server_frame_bufioreq: + rc = -ENOENT; + if ( !HANDLE_BUFIOREQ(s) ) + goto out; + + *mfn = _mfn(page_to_mfn(s->bufioreq.page)); + rc = 0; + break; + + case XENMEM_resource_ioreq_server_frame_ioreq(0): + *mfn = _mfn(page_to_mfn(s->ioreq.page)); + rc = 0; + break; + + default: + rc = -EINVAL; + break; + } + + out: + spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock); + + return rc; +} + int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id, uint32_t type, uint64_t start, uint64_t end) @@ -1390,10 +1532,22 @@ static int hvm_access_cf8( void hvm_ioreq_init(struct domain *d) { spin_lock_init(&d->arch.hvm_domain.ioreq_server.lock); + INIT_PAGE_LIST_HEAD(&d->arch.hvm_domain.ioreq_server.pages); register_portio_handler(d, 0xcf8, 4, hvm_access_cf8); } +void hvm_ioreq_deinit(struct domain *d) +{ + while ( !page_list_empty(&d->arch.hvm_domain.ioreq_server.pages) ) + { + struct page_info *page = + page_list_remove_head(&d->arch.hvm_domain.ioreq_server.pages); + + free_xenheap_page(page); + } +} + /* * Local variables: * mode: C diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index a56f875d45..9cca748134 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -122,6 +122,7 @@ #include <asm/fixmap.h> #include <asm/io_apic.h> #include <asm/pci.h> +#include <asm/hvm/ioreq.h> #include <asm/hvm/grant_table.h> #include <asm/pv/grant_table.h> @@ -4191,6 +4192,46 @@ int xenmem_add_to_physmap_one( return rc; } +int arch_acquire_resource(struct domain *d, unsigned int type, + unsigned int id, unsigned long frame, + unsigned int nr_frames, xen_pfn_t mfn_list[]) +{ + int rc; + + switch ( type ) + { + case XENMEM_resource_ioreq_server: + { + ioservid_t ioservid = id; + unsigned int i; + + rc = -EINVAL; + if ( id != (unsigned int)ioservid ) + break; + + rc = 0; + for ( i = 0; i < nr_frames; i++ ) + { + mfn_t mfn; + + rc = hvm_get_ioreq_server_frame(d, id, frame + i, &mfn); + if ( rc ) + break; + + mfn_list[i] = mfn_x(mfn); + } + + break; + } + + default: + rc = -EOPNOTSUPP; + break; + } + + return rc; +} + long arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) { int rc; diff --git a/xen/common/memory.c b/xen/common/memory.c index 0991c0c0a8..3d810606da 100644 --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -990,7 +990,8 @@ static int acquire_resource( switch ( xmar.type ) { default: - rc = -EOPNOTSUPP; + rc = arch_acquire_resource(d, xmar.type, xmar.id, xmar.frame, + xmar.nr_frames, mfn_list); break; } diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h index 4d5563b0ce..201534ab0c 100644 --- a/xen/include/asm-arm/mm.h +++ b/xen/include/asm-arm/mm.h @@ -381,6 +381,13 @@ static inline void put_page_and_type(struct page_info *page) void clear_and_clean_page(struct page_info *page); +static inline int arch_acquire_resource( + struct domain *d, unsigned int type, unsigned int id, + unsigned long frame,unsigned int nr_frames, xen_pfn_t mfn_list[]) +{ + return -EOPNOTSUPP; +} + #endif /* __ARCH_ARM_MM__ */ /* * Local variables: diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h index 8b798ee4e9..91bd77f270 100644 --- a/xen/include/asm-x86/hvm/domain.h +++ b/xen/include/asm-x86/hvm/domain.h @@ -113,6 +113,7 @@ struct hvm_domain { struct { spinlock_t lock; struct hvm_ioreq_server *server[MAX_NR_IOREQ_SERVERS]; + struct page_list_head pages; } ioreq_server; /* Cached CF8 for guest PCI config cycles */ diff --git a/xen/include/asm-x86/hvm/ioreq.h b/xen/include/asm-x86/hvm/ioreq.h index 1829fcf43e..37d9f5ff70 100644 --- a/xen/include/asm-x86/hvm/ioreq.h +++ b/xen/include/asm-x86/hvm/ioreq.h @@ -31,6 +31,8 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id, unsigned long *ioreq_gfn, unsigned long *bufioreq_gfn, evtchn_port_t *bufioreq_port); +int hvm_get_ioreq_server_frame(struct domain *d, ioservid_t id, + unsigned long idx, mfn_t *mfn); int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id, uint32_t type, uint64_t start, uint64_t end); @@ -55,6 +57,7 @@ int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *proto_p, unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered); void hvm_ioreq_init(struct domain *d); +void hvm_ioreq_deinit(struct domain *d); #endif /* __ASM_X86_HVM_IOREQ_H__ */ diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h index 4af6b2341a..7147eeff34 100644 --- a/xen/include/asm-x86/mm.h +++ b/xen/include/asm-x86/mm.h @@ -630,4 +630,9 @@ static inline bool arch_mfn_in_directmap(unsigned long mfn) return mfn <= (virt_to_mfn(eva - 1) + 1); } +int arch_acquire_resource(struct domain *d, unsigned int type, + unsigned int id, unsigned long frame, + unsigned int nr_frames, + xen_pfn_t mfn_list[]); + #endif /* __ASM_X86_MM_H__ */ diff --git a/xen/include/public/hvm/dm_op.h b/xen/include/public/hvm/dm_op.h index 13b3737c2f..add68ea192 100644 --- a/xen/include/public/hvm/dm_op.h +++ b/xen/include/public/hvm/dm_op.h @@ -90,6 +90,10 @@ struct xen_dm_op_create_ioreq_server { * the frame numbers passed back in gfns <ioreq_gfn> and <bufioreq_gfn> * respectively. (If the IOREQ Server is not handling buffered emulation * only <ioreq_gfn> will be valid). + * + * NOTE: To access the synchronous ioreq structures and buffered ioreq + * ring, it is preferable to use the XENMEM_acquire_resource memory + * op specifying resource type XENMEM_resource_ioreq_server. */ #define XEN_DMOP_get_ioreq_server_info 2 diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h index 83e60b6603..838f248a59 100644 --- a/xen/include/public/memory.h +++ b/xen/include/public/memory.h @@ -609,9 +609,14 @@ struct xen_mem_acquire_resource { domid_t domid; /* IN - the type of resource */ uint16_t type; + +#define XENMEM_resource_ioreq_server 0 + /* * IN - a type-specific resource identifier, which must be zero * unless stated otherwise. + * + * type == XENMEM_resource_ioreq_server -> id == ioreq server id */ uint32_t id; /* IN/OUT - As an IN parameter number of frames of the resource @@ -625,6 +630,10 @@ struct xen_mem_acquire_resource { * is ignored if nr_frames is 0. */ uint64_aligned_t frame; + +#define XENMEM_resource_ioreq_server_frame_bufioreq 0 +#define XENMEM_resource_ioreq_server_frame_ioreq(n) (1 + (n)) + /* IN/OUT - If the tools domain is PV then, upon return, frame_list * will be populated with the MFNs of the resource. * If the tools domain is HVM then it is expected that, on -- 2.11.0 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |