[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 08/11] optee: add support for arbitrary shared memory



From: Volodymyr Babchuk <vlad.babchuk@xxxxxxxxx>

Shared memory is widely used by NW to communicate with
TAs in OP-TEE. NW can share part of own memory with
TA or OP-TEE core, by registering it OP-TEE, or by providing
a temporal reference. Anyways, information about such memory
buffers are sent to OP-TEE as a list of pages. This mechanism
is described in optee_msg.h.

Mediator should step in when NW tries to share memory with
OP-TEE for two reasons:

1. Do address translation from IPA to PA.
2. Pin domain pages till they are mapped into OP-TEE or TA
   address space, so domain can't transfer this pages to
   other domain or balloon out them.

Address translation is done by translate_noncontig(...) function.
It allocates new buffer from xenheap and then walks on guest
provided list of pages, translates addresses and stores PAs into
newly allocated buffer. This buffer will be provided to OP-TEE
instead of original buffer from the guest. This buffer will
be free at the end of standard call.

In the same time this function pins pages and stores them in
struct optee_shm_buf object. This object will live all the time,
when given SHM buffer is known to OP-TEE. It will be freed
after guest unregisters shared buffer. At this time pages
will be unpinned.

We don't need to do any special reference counting because OP-TEE
tracks buffer on its side. So, mediator will unpin pages only
when OP-TEE returns successfully from OPTEE_MSG_CMD_UNREGISTER_SHM
call.

Signed-off-by: Volodymyr Babchuk <vlad.babchuk@xxxxxxxxx>
---

 Changes from v2:
 - Made sure that guest does not tries to register shared buffer with
   the same cookie twice
 - Fixed coding style
 - Use access_guest_memory_by_ipa() instead of direct memory mapping

 xen/arch/arm/tee/optee.c | 274 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 274 insertions(+)

diff --git a/xen/arch/arm/tee/optee.c b/xen/arch/arm/tee/optee.c
index 771148e940..cfc3b34df7 100644
--- a/xen/arch/arm/tee/optee.c
+++ b/xen/arch/arm/tee/optee.c
@@ -37,6 +37,10 @@
  */
 #define MAX_RPC_SHMS    MAX_STD_CALLS
 
+/* Maximum total number of pages that guest can share with OP-TEE */
+#define MAX_TOTAL_SMH_BUF_PG    16384
+#define MAX_NONCONTIG_ENTRIES   5
+
 #define OPTEE_KNOWN_NSEC_CAPS OPTEE_SMC_NSEC_CAP_UNIPROCESSOR
 #define OPTEE_KNOWN_SEC_CAPS (OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM | \
                               OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM |  \
@@ -50,6 +54,9 @@ struct optee_std_call {
     struct list_head list;
     struct optee_msg_arg *xen_arg;
     paddr_t guest_arg_ipa;
+    /* Buffer for translated page addresses, shared with OP-TEE */
+    void *non_contig[MAX_NONCONTIG_ENTRIES];
+    int non_contig_order[MAX_NONCONTIG_ENTRIES];
     int optee_thread_id;
     int rpc_op;
     bool in_flight;
@@ -63,12 +70,23 @@ struct shm_rpc {
     uint64_t cookie;
 };
 
+/* Shared memory buffer for arbitrary data */
+struct optee_shm_buf {
+    struct list_head list;
+    uint64_t cookie;
+    unsigned int max_page_cnt;
+    unsigned int page_cnt;
+    struct page_info *pages[];
+};
+
 /* Domain context */
 struct optee_domain {
     struct list_head call_list;
     struct list_head shm_rpc_list;
+    struct list_head optee_shm_buf_list;
     atomic_t call_count;
     atomic_t shm_rpc_count;
+    atomic_t optee_shm_buf_pages;
     spinlock_t lock;
 };
 
@@ -125,9 +143,11 @@ static int optee_enable(struct domain *d)
 
     INIT_LIST_HEAD(&ctx->call_list);
     INIT_LIST_HEAD(&ctx->shm_rpc_list);
+    INIT_LIST_HEAD(&ctx->optee_shm_buf_list);
 
     atomic_set(&ctx->call_count, 0);
     atomic_set(&ctx->shm_rpc_count, 0);
+    atomic_set(&ctx->optee_shm_buf_pages, 0);
 
     spin_lock_init(&ctx->lock);
 
@@ -325,12 +345,91 @@ static void free_shm_rpc(struct optee_domain *ctx, 
uint64_t cookie)
     xfree(shm_rpc);
 }
 
+static struct optee_shm_buf *allocate_optee_shm_buf(struct optee_domain *ctx,
+                                                    uint64_t cookie,
+                                                    unsigned int pages_cnt)
+{
+    struct optee_shm_buf *optee_shm_buf, *optee_shm_buf_tmp;
+
+    while ( true )
+    {
+        int old = atomic_read(&ctx->optee_shm_buf_pages);
+        int new = old + pages_cnt;
+        if ( new >= MAX_TOTAL_SMH_BUF_PG )
+            return NULL;
+        if ( likely(old == atomic_cmpxchg(&ctx->optee_shm_buf_pages,
+                                          old, new)) )
+            break;
+    }
+
+    optee_shm_buf = xzalloc_bytes(sizeof(struct optee_shm_buf) +
+                            pages_cnt * sizeof(struct page *));
+    if ( !optee_shm_buf )
+        goto err;
+
+    optee_shm_buf->cookie = cookie;
+    optee_shm_buf->max_page_cnt = pages_cnt;
+
+    spin_lock(&ctx->lock);
+    /* Check if there is already SHM with the same cookie */
+    list_for_each_entry( optee_shm_buf_tmp, &ctx->optee_shm_buf_list, list )
+    {
+        if ( optee_shm_buf_tmp->cookie == cookie )
+        {
+            spin_unlock(&ctx->lock);
+            gprintk(XENLOG_WARNING, "Guest tries to use the same SHM buffer 
cookie");
+            goto err;
+        }
+    }
+
+    list_add_tail(&optee_shm_buf->list, &ctx->optee_shm_buf_list);
+    spin_unlock(&ctx->lock);
+
+    return optee_shm_buf;
+
+err:
+    atomic_sub(pages_cnt, &ctx->optee_shm_buf_pages);
+    xfree(optee_shm_buf);
+
+    return NULL;
+}
+
+static void free_optee_shm_buf(struct optee_domain *ctx, uint64_t cookie)
+{
+    struct optee_shm_buf *optee_shm_buf;
+    bool found = false;
+
+    spin_lock(&ctx->lock);
+    list_for_each_entry( optee_shm_buf, &ctx->optee_shm_buf_list, list )
+    {
+        if ( optee_shm_buf->cookie == cookie )
+        {
+            found = true;
+            list_del(&optee_shm_buf->list);
+            break;
+        }
+    }
+    spin_unlock(&ctx->lock);
+
+    if ( !found )
+        return;
+
+    for ( int i = 0; i < optee_shm_buf->page_cnt; i++ )
+        if ( optee_shm_buf->pages[i] )
+            put_page(optee_shm_buf->pages[i]);
+
+    atomic_sub(optee_shm_buf->max_page_cnt, &ctx->optee_shm_buf_pages);
+
+    xfree(optee_shm_buf);
+}
+
 static void optee_domain_destroy(struct domain *d)
 {
     struct arm_smccc_res resp;
     struct optee_std_call *call, *call_tmp;
     struct optee_domain *ctx = d->arch.tee;
     struct shm_rpc *shm_rpc, *shm_rpc_tmp;
+    struct optee_shm_buf *optee_shm_buf, *optee_shm_buf_tmp;
 
     /* At this time all domain VCPUs should be stopped */
 
@@ -349,12 +448,177 @@ static void optee_domain_destroy(struct domain *d)
     list_for_each_entry_safe( shm_rpc, shm_rpc_tmp, &ctx->shm_rpc_list, list )
         free_shm_rpc(ctx, shm_rpc->cookie);
 
+    list_for_each_entry_safe( optee_shm_buf, optee_shm_buf_tmp,
+                              &ctx->optee_shm_buf_list, list )
+        free_optee_shm_buf(ctx, optee_shm_buf->cookie);
+
     ASSERT(!atomic_read(&ctx->call_count));
     ASSERT(!atomic_read(&ctx->shm_rpc_count));
+    ASSERT(!atomic_read(&ctx->optee_shm_buf_pages));
 
     xfree(d->arch.tee);
 }
 
+#define PAGELIST_ENTRIES_PER_PAGE                       \
+    ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
+
+static size_t get_pages_list_size(size_t num_entries)
+{
+    int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
+
+    return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
+}
+
+static bool translate_noncontig(struct optee_domain *ctx,
+                                struct optee_std_call *call,
+                                struct optee_msg_param *param,
+                                int idx)
+{
+    /*
+     * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h for 
details.
+     */
+    uint64_t size;
+    unsigned int page_offset;
+    unsigned int num_pages;
+    unsigned int order;
+    unsigned int entries_on_page = 0;
+    paddr_t gaddr;
+    struct page_info *guest_page;
+    struct {
+        uint64_t pages_list[PAGELIST_ENTRIES_PER_PAGE];
+        uint64_t next_page_data;
+    } *pages_data_guest, *pages_data_xen, *pages_data_xen_start;
+    struct optee_shm_buf *optee_shm_buf;
+
+    /* Offset of user buffer withing page */
+    page_offset = param->u.tmem.buf_ptr & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1);
+
+    /* Size of the user buffer in bytes */
+    size = ROUNDUP(param->u.tmem.size + page_offset,
+                   OPTEE_MSG_NONCONTIG_PAGE_SIZE);
+
+    num_pages = DIV_ROUND_UP(size, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
+
+    order = get_order_from_bytes(get_pages_list_size(num_pages));
+
+    pages_data_xen_start = alloc_xenheap_pages(order, 0);
+    if ( !pages_data_xen_start )
+        return false;
+
+    optee_shm_buf = allocate_optee_shm_buf(ctx, param->u.tmem.shm_ref,
+                                           num_pages);
+    if ( !optee_shm_buf )
+        goto err_free;
+
+    gaddr = param->u.tmem.buf_ptr & ~(OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1);
+    guest_page = get_page_from_gfn(current->domain, paddr_to_pfn(gaddr), NULL,
+                                   P2M_ALLOC);
+
+    if ( !guest_page )
+        goto err_free;
+
+    pages_data_guest = map_domain_page(page_to_mfn(guest_page));
+    if ( !pages_data_guest )
+        goto err_free;
+
+    pages_data_xen = pages_data_xen_start;
+    while ( num_pages )
+    {
+        struct page_info *page;
+        page = get_page_from_gfn(current->domain,
+                  paddr_to_pfn(pages_data_guest->pages_list[entries_on_page]),
+                  NULL, P2M_ALLOC);
+
+        if ( !page )
+            goto err_unmap;
+
+        optee_shm_buf->pages[optee_shm_buf->page_cnt++] = page;
+        pages_data_xen->pages_list[entries_on_page] = page_to_maddr(page);
+        entries_on_page++;
+
+        if ( entries_on_page == PAGELIST_ENTRIES_PER_PAGE )
+        {
+            pages_data_xen->next_page_data = virt_to_maddr(pages_data_xen + 1);
+            pages_data_xen++;
+            gaddr = pages_data_guest->next_page_data;
+
+            unmap_domain_page(pages_data_guest);
+            put_page(guest_page);
+
+            guest_page = get_page_from_gfn(current->domain, 
paddr_to_pfn(gaddr),
+                                           NULL, P2M_ALLOC);
+            if ( !guest_page )
+                goto err_free;
+
+            pages_data_guest = map_domain_page(page_to_mfn(guest_page));
+            if ( !pages_data_guest )
+                goto err_free;
+            /* Roll over to the next page */
+            entries_on_page = 0;
+        }
+        num_pages--;
+    }
+
+    param->u.tmem.buf_ptr = virt_to_maddr(pages_data_xen_start) | page_offset;
+
+    call->non_contig[idx] = pages_data_xen_start;
+    call->non_contig_order[idx] = order;
+
+    unmap_domain_page(pages_data_guest);
+    put_page(guest_page);
+    return true;
+
+err_unmap:
+    unmap_domain_page(pages_data_guest);
+    put_page(guest_page);
+    free_optee_shm_buf(ctx, optee_shm_buf->cookie);
+
+err_free:
+    free_xenheap_pages(pages_data_xen_start, order);
+
+    return false;
+}
+
+static bool translate_params(struct optee_domain *ctx,
+                             struct optee_std_call *call)
+{
+    unsigned int i;
+    uint32_t attr;
+
+    for ( i = 0; i < call->xen_arg->num_params; i++ )
+    {
+        attr = call->xen_arg->params[i].attr;
+
+        switch ( attr & OPTEE_MSG_ATTR_TYPE_MASK )
+        {
+        case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
+        case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
+        case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
+            if ( attr & OPTEE_MSG_ATTR_NONCONTIG )
+            {
+                if ( !translate_noncontig(ctx, call,
+                                          call->xen_arg->params + i, i) )
+                    return false;
+            }
+            else
+            {
+                gprintk(XENLOG_WARNING, "Guest tries to use old tmem arg\n");
+                return false;
+            }
+            break;
+        case OPTEE_MSG_ATTR_TYPE_NONE:
+        case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
+        case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
+        case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
+        case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
+        case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
+        case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
+            continue;
+        }
+    }
+    return true;
+}
+
 /*
  * Copy command buffer into xen memory to:
  * 1) Hide translated addresses from guest
@@ -469,6 +733,14 @@ static void execute_std_call(struct optee_domain *ctx,
 
     copy_std_request_back(ctx, regs, call);
 
+    /*
+     * If guest successfully unregistered own shared memory,
+     * then we can unpin it's pages
+     */
+    if ( call->xen_arg->cmd == OPTEE_MSG_CMD_UNREGISTER_SHM &&
+         call->xen_arg->ret == 0 )
+        free_optee_shm_buf(ctx, call->xen_arg->params[0].u.rmem.shm_ref);
+
     put_std_call(ctx, call);
     free_std_call(ctx, call);
 }
@@ -497,6 +769,8 @@ static bool handle_std_call(struct optee_domain *ctx,
     case OPTEE_MSG_CMD_CANCEL:
     case OPTEE_MSG_CMD_REGISTER_SHM:
     case OPTEE_MSG_CMD_UNREGISTER_SHM:
+        if( !translate_params(ctx, call) )
+            goto err;
         break;
     default:
         goto err;
-- 
2.20.0

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.