[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v1 4/6] optee: add OP-TEE mediator



Add OP-TEE mediator, so guests can access OP-TEE services.

OP-TEE mediator support address translation for DomUs.
It tracks execution of STD calls, correctly handles memory-related RPC
requests, tracks buffer allocated for RPCs.

With this patch OP-TEE sucessfully passes own tests, while client is
running in DomU.

Signed-off-by: Volodymyr Babchuk <volodymyr_babchuk@xxxxxxxx>
---

 Changes from "RFC":
 - Removed special case for Dom0/HwDOM
 - No more support for plain OP-TEE (only OP-TEE with virtualization
   config enabled is supported)
 - Multiple domains is now supported
 - Pages that are shared between OP-TEE and domain are now pinned
 - Renamed CONFIG_ARM_OPTEE to CONFIG_OPTEE
 - Command buffers from domain are now shadowed by XEN
 - Mediator now filters out unknown capabilities and requests
 - call contexts, shared memory object now stored per-domain

 xen/arch/arm/tee/Kconfig            |   4 +
 xen/arch/arm/tee/Makefile           |   1 +
 xen/arch/arm/tee/optee.c            | 972 ++++++++++++++++++++++++++++++++++++
 xen/include/asm-arm/tee/optee_smc.h |  50 ++
 4 files changed, 1027 insertions(+)
 create mode 100644 xen/arch/arm/tee/optee.c

diff --git a/xen/arch/arm/tee/Kconfig b/xen/arch/arm/tee/Kconfig
index e69de29..5b829db 100644
--- a/xen/arch/arm/tee/Kconfig
+++ b/xen/arch/arm/tee/Kconfig
@@ -0,0 +1,4 @@
+config OPTEE
+       bool "Enable OP-TEE mediator"
+       default n
+       depends on TEE
diff --git a/xen/arch/arm/tee/Makefile b/xen/arch/arm/tee/Makefile
index c54d479..982c879 100644
--- a/xen/arch/arm/tee/Makefile
+++ b/xen/arch/arm/tee/Makefile
@@ -1 +1,2 @@
 obj-y += tee.o
+obj-$(CONFIG_OPTEE) += optee.o
diff --git a/xen/arch/arm/tee/optee.c b/xen/arch/arm/tee/optee.c
new file mode 100644
index 0000000..7809406
--- /dev/null
+++ b/xen/arch/arm/tee/optee.c
@@ -0,0 +1,972 @@
+/*
+ * xen/arch/arm/tee/optee.c
+ *
+ * OP-TEE mediator
+ *
+ * Volodymyr Babchuk <volodymyr_babchuk@xxxxxxxx>
+ * Copyright (c) 2018 EPAM Systems.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <xen/domain_page.h>
+#include <xen/types.h>
+#include <xen/sched.h>
+
+#include <asm/p2m.h>
+#include <asm/tee/tee.h>
+
+#include <asm/tee/optee_msg.h>
+#include <asm/tee/optee_smc.h>
+
+#define MAX_NONCONTIG_ENTRIES   5
+
+/* * Call context. OP-TEE can issue multiple RPC returns during one call.
+ * We need to preserve context during them.
+ */
+struct std_call_ctx {
+    struct list_head list;
+    struct optee_msg_arg *guest_arg;
+    struct optee_msg_arg *xen_arg;
+    void *non_contig[MAX_NONCONTIG_ENTRIES];
+    int non_contig_order[MAX_NONCONTIG_ENTRIES];
+    mfn_t guest_arg_mfn;
+    int optee_thread_id;
+    int rpc_op;
+};
+
+/* Pre-allocated SHM buffer for RPC commands */
+struct shm_rpc {
+    struct list_head list;
+    struct optee_msg_arg *guest_arg;
+    struct page *guest_page;
+    mfn_t guest_mfn;
+    uint64_t cookie;
+};
+
+/* Shared memory buffer for arbitrary data */
+struct shm_buf {
+    struct list_head list;
+    uint64_t cookie;
+    int page_cnt;
+    struct page_info *pages[];
+};
+
+struct domain_ctx {
+    struct list_head list;
+    struct list_head call_ctx_list;
+    struct list_head shm_rpc_list;
+    struct list_head shm_buf_list;
+    struct domain *domain;
+    spinlock_t lock;
+};
+
+static LIST_HEAD(domain_ctx_list);
+static DEFINE_SPINLOCK(domain_ctx_list_lock);
+
+static int optee_probe(void)
+{
+    struct dt_device_node *node;
+    struct smccc_res resp;
+
+    /* Check for entry in dtb  */
+    node = dt_find_compatible_node(NULL, NULL, "linaro,optee-tz");
+    if ( !node )
+        return -ENODEV;
+
+    /* Check UID */
+    call_smccc_smc(ARM_SMCCC_CALL_UID_FID(TRUSTED_OS_END), 0, 0, 0, 0, 0, 0, 0,
+                   &resp);
+
+    if ( resp.a0 != OPTEE_MSG_UID_0 ||
+         resp.a1 != OPTEE_MSG_UID_1 ||
+         resp.a2 != OPTEE_MSG_UID_2 ||
+         resp.a3 != OPTEE_MSG_UID_3 )
+        return -ENODEV;
+
+    printk("OP-TEE mediator initialized\n");
+    return 0;
+}
+
+static mfn_t lookup_and_pin_guest_ram_addr(paddr_t gaddr,
+                                            struct page_info **pg)
+{
+    mfn_t mfn;
+    gfn_t gfn;
+    p2m_type_t t;
+    struct page_info *page;
+    struct domain *d = current->domain;
+
+    gfn = gaddr_to_gfn(gaddr);
+    mfn = p2m_lookup(d, gfn, &t);
+
+    if ( t != p2m_ram_rw || mfn_eq(mfn, INVALID_MFN) )
+        return INVALID_MFN;
+
+    page = mfn_to_page(mfn);
+    if ( !page )
+        return INVALID_MFN;
+
+    if ( !get_page(page, d) )
+        return INVALID_MFN;
+
+    if ( pg )
+        *pg = page;
+
+    return mfn;
+}
+
+static void unpin_guest_ram_addr(mfn_t mfn)
+{
+    struct page_info *page;
+    page = mfn_to_page(mfn);
+    if ( !page )
+        return;
+
+    put_page(page);
+}
+
+static struct domain_ctx *find_domain_ctx(struct domain* d)
+{
+    struct domain_ctx *ctx;
+
+    spin_lock(&domain_ctx_list_lock);
+
+    list_for_each_entry( ctx, &domain_ctx_list, list )
+    {
+        if ( ctx->domain == d )
+        {
+                spin_unlock(&domain_ctx_list_lock);
+                return ctx;
+        }
+    }
+
+    spin_unlock(&domain_ctx_list_lock);
+    return NULL;
+}
+
+static int optee_domain_create(struct domain *d)
+{
+    struct smccc_res resp;
+    struct domain_ctx *ctx;
+
+    ctx = xzalloc(struct domain_ctx);
+    if ( !ctx )
+        return -ENOMEM;
+
+    call_smccc_smc(OPTEE_SMC_VM_CREATED,
+                   d->domain_id + 1, 0, 0, 0, 0, 0, 0, &resp);
+    if ( resp.a0 != OPTEE_SMC_RETURN_OK ) {
+        gprintk(XENLOG_WARNING, "OP-TEE don't want to support domain: %d\n",
+                (uint32_t)resp.a0);
+        xfree(ctx);
+        return -ENODEV;
+    }
+
+    ctx->domain = d;
+    INIT_LIST_HEAD(&ctx->call_ctx_list);
+    INIT_LIST_HEAD(&ctx->shm_rpc_list);
+    INIT_LIST_HEAD(&ctx->shm_buf_list);
+    spin_lock_init(&ctx->lock);
+
+    spin_lock(&domain_ctx_list_lock);
+    list_add_tail(&ctx->list, &domain_ctx_list);
+    spin_unlock(&domain_ctx_list_lock);
+
+    return 0;
+}
+
+static bool forward_call(struct cpu_user_regs *regs)
+{
+    struct smccc_res resp;
+
+    call_smccc_smc(get_user_reg(regs, 0),
+                   get_user_reg(regs, 1),
+                   get_user_reg(regs, 2),
+                   get_user_reg(regs, 3),
+                   get_user_reg(regs, 4),
+                   get_user_reg(regs, 5),
+                   get_user_reg(regs, 6),
+                   /* client id 0 is reserved for hypervisor itself */
+                   current->domain->domain_id + 1,
+                   &resp);
+
+    set_user_reg(regs, 0, resp.a0);
+    set_user_reg(regs, 1, resp.a1);
+    set_user_reg(regs, 2, resp.a2);
+    set_user_reg(regs, 3, resp.a3);
+    set_user_reg(regs, 4, 0);
+    set_user_reg(regs, 5, 0);
+    set_user_reg(regs, 6, 0);
+    set_user_reg(regs, 7, 0);
+
+    return resp.a0 == OPTEE_SMC_RETURN_OK;
+}
+
+static void set_return(struct cpu_user_regs *regs, uint32_t ret)
+{
+    set_user_reg(regs, 0, ret);
+    set_user_reg(regs, 1, 0);
+    set_user_reg(regs, 2, 0);
+    set_user_reg(regs, 3, 0);
+    set_user_reg(regs, 4, 0);
+    set_user_reg(regs, 5, 0);
+    set_user_reg(regs, 6, 0);
+    set_user_reg(regs, 7, 0);
+}
+
+static struct shm_buf *allocate_shm_buf(struct domain_ctx *ctx,
+                                        uint64_t cookie,
+                                        int page_cnt)
+{
+    struct shm_buf *shm_buf;
+
+    shm_buf = xzalloc_bytes(sizeof(struct shm_buf) +
+                            page_cnt * sizeof(struct page *));
+
+    if ( !shm_buf )
+        return NULL;
+
+    shm_buf->cookie = cookie;
+
+    spin_lock(&ctx->lock);
+    list_add_tail(&shm_buf->list, &ctx->shm_buf_list);
+    spin_unlock(&ctx->lock);
+
+    return shm_buf;
+}
+
+static void free_shm_buf(struct domain_ctx *ctx, uint64_t cookie)
+{
+    struct shm_buf *shm_buf;
+    bool found = false;
+    spin_lock(&ctx->lock);
+
+    list_for_each_entry( shm_buf, &ctx->shm_buf_list, list )
+    {
+        if ( shm_buf->cookie == cookie )
+        {
+            found = true;
+            list_del(&shm_buf->list);
+            break;
+        }
+    }
+    spin_unlock(&ctx->lock);
+
+    if ( !found ) {
+        return;
+    }
+
+    for ( int i = 0; i < shm_buf->page_cnt; i++ )
+        if ( shm_buf->pages[i] )
+            put_page(shm_buf->pages[i]);
+
+    xfree(shm_buf);
+}
+
+static struct std_call_ctx *allocate_std_call_ctx(struct domain_ctx *ctx)
+{
+    struct std_call_ctx *call;
+
+    call = xzalloc(struct std_call_ctx);
+    if ( !call )
+        return NULL;
+
+    call->optee_thread_id = -1;
+
+    spin_lock(&ctx->lock);
+    list_add_tail(&call->list, &ctx->call_ctx_list);
+    spin_unlock(&ctx->lock);
+
+    return call;
+}
+
+static void free_std_call_ctx(struct domain_ctx *ctx, struct std_call_ctx 
*call)
+{
+    spin_lock(&ctx->lock);
+    list_del(&call->list);
+    spin_unlock(&ctx->lock);
+
+    if ( call->xen_arg )
+        free_xenheap_page(call->xen_arg);
+
+    if ( call->guest_arg ) {
+        unmap_domain_page_global(call->guest_arg);
+        unpin_guest_ram_addr(call->guest_arg_mfn);
+    }
+
+    for ( int i = 0; i < MAX_NONCONTIG_ENTRIES; i++ ) {
+        if ( call->non_contig[i] )
+            free_xenheap_pages(call->non_contig[i], call->non_contig_order[i]);
+    }
+
+    xfree(call);
+}
+
+static struct std_call_ctx *find_call_ctx(struct domain_ctx *ctx, int 
thread_id)
+{
+    struct std_call_ctx *call;
+
+    spin_lock(&ctx->lock);
+    list_for_each_entry( call, &ctx->call_ctx_list, list )
+    {
+        if ( call->optee_thread_id == thread_id )
+        {
+                spin_unlock(&ctx->lock);
+                return call;
+        }
+    }
+    spin_unlock(&ctx->lock);
+
+    return NULL;
+}
+
+#define PAGELIST_ENTRIES_PER_PAGE                       \
+    ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
+
+static size_t get_pages_list_size(size_t num_entries)
+{
+    int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
+
+    return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
+}
+
+static struct shm_rpc *allocate_and_map_shm_rpc(struct domain_ctx *ctx, 
paddr_t gaddr,
+                                        uint64_t cookie)
+{
+    struct shm_rpc *shm_rpc;
+
+    shm_rpc = xzalloc(struct shm_rpc);
+    if ( !shm_rpc )
+        return NULL;
+
+    shm_rpc->guest_mfn = lookup_and_pin_guest_ram_addr(gaddr, NULL);
+
+    if ( mfn_eq(shm_rpc->guest_mfn, INVALID_MFN) )
+    {
+        xfree(shm_rpc);
+        return NULL;
+    }
+
+    shm_rpc->guest_arg = map_domain_page_global(shm_rpc->guest_mfn);
+    if ( !shm_rpc->guest_arg )
+    {
+        gprintk(XENLOG_INFO, "Could not map domain page\n");
+        xfree(shm_rpc);
+        return NULL;
+    }
+    shm_rpc->cookie = cookie;
+
+    spin_lock(&ctx->lock);
+    list_add_tail(&shm_rpc->list, &ctx->shm_rpc_list);
+    spin_unlock(&ctx->lock);
+
+    return shm_rpc;
+}
+
+static void free_shm_rpc(struct domain_ctx *ctx, uint64_t cookie)
+{
+    struct shm_rpc *shm_rpc;
+    bool found = false;
+
+    spin_lock(&ctx->lock);
+
+    list_for_each_entry( shm_rpc, &ctx->shm_rpc_list, list )
+    {
+        if ( shm_rpc->cookie == cookie )
+        {
+            found = true;
+            list_del(&shm_rpc->list);
+            break;
+        }
+    }
+    spin_unlock(&ctx->lock);
+
+    if ( !found ) {
+        return;
+    }
+
+    if ( shm_rpc->guest_arg ) {
+        unpin_guest_ram_addr(shm_rpc->guest_mfn);
+        unmap_domain_page_global(shm_rpc->guest_arg);
+    }
+
+    xfree(shm_rpc);
+}
+
+static void optee_domain_destroy(struct domain *d)
+{
+    struct smccc_res resp;
+    struct domain_ctx *ctx;
+    struct std_call_ctx *call, *call_tmp;
+    struct shm_rpc *shm_rpc, *shm_rpc_tmp;
+    struct shm_buf *shm_buf, *shm_buf_tmp;
+    bool found = false;
+
+    /* At this time all domain VCPUs should be stopped */
+
+    /* Inform OP-TEE that domain is shutting down */
+    call_smccc_smc(OPTEE_SMC_VM_DESTROYED,
+                   d->domain_id + 1, 0, 0, 0, 0, 0, 0, &resp);
+
+    /* Remove context from the list */
+    spin_lock(&domain_ctx_list_lock);
+    list_for_each_entry( ctx, &domain_ctx_list, list )
+    {
+        if ( ctx->domain == d )
+        {
+            found = true;
+            list_del(&ctx->list);
+            break;
+        }
+    }
+    spin_unlock(&domain_ctx_list_lock);
+
+    if ( !found )
+        return;
+
+    ASSERT(!spin_is_locked(&ctx->lock));
+
+    list_for_each_entry_safe( call, call_tmp, &ctx->call_ctx_list, list )
+        free_std_call_ctx(ctx, call);
+
+    list_for_each_entry_safe( shm_rpc, shm_rpc_tmp, &ctx->shm_rpc_list, list )
+        free_shm_rpc(ctx, shm_rpc->cookie);
+
+    list_for_each_entry_safe( shm_buf, shm_buf_tmp, &ctx->shm_buf_list, list )
+        free_shm_buf(ctx, shm_buf->cookie);
+
+    xfree(ctx);
+}
+
+static struct shm_rpc *find_shm_rpc(struct domain_ctx *ctx, uint64_t cookie)
+{
+    struct shm_rpc *shm_rpc;
+
+    spin_lock(&ctx->lock);
+    list_for_each_entry( shm_rpc, &ctx->shm_rpc_list, list )
+    {
+        if ( shm_rpc->cookie == cookie )
+        {
+                spin_unlock(&ctx->lock);
+                return shm_rpc;
+        }
+    }
+    spin_unlock(&ctx->lock);
+
+    return NULL;
+}
+
+static bool translate_noncontig(struct domain_ctx *ctx,
+                                struct std_call_ctx *call,
+                                struct optee_msg_param *param,
+                                int idx)
+{
+    /*
+     * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h for 
details.
+     */
+    uint64_t size;
+    int page_offset;
+    int num_pages;
+    int order;
+    int entries_on_page = 0;
+    paddr_t gaddr;
+    mfn_t guest_mfn;
+    struct {
+        uint64_t pages_list[PAGELIST_ENTRIES_PER_PAGE];
+        uint64_t next_page_data;
+    } *pages_data_guest, *pages_data_xen, *pages_data_xen_start;
+    struct shm_buf *shm_buf;
+
+    page_offset = param->u.tmem.buf_ptr & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1);
+
+    size = ROUNDUP(param->u.tmem.size + page_offset,
+                   OPTEE_MSG_NONCONTIG_PAGE_SIZE);
+
+    num_pages = DIV_ROUND_UP(size, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
+
+    order = get_order_from_bytes(get_pages_list_size(num_pages));
+
+    pages_data_xen_start = alloc_xenheap_pages(order, 0);
+    if ( !pages_data_xen_start )
+        return false;
+
+    shm_buf = allocate_shm_buf(ctx, param->u.tmem.shm_ref, num_pages);
+    if ( !shm_buf )
+        goto err_free;
+
+    gaddr = param->u.tmem.buf_ptr & ~(OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1);
+    guest_mfn = lookup_and_pin_guest_ram_addr(gaddr, NULL);
+    if ( mfn_eq(guest_mfn, INVALID_MFN) )
+        goto err_free;
+
+    pages_data_guest = map_domain_page(guest_mfn);
+    if ( !pages_data_guest )
+        goto err_free;
+
+    pages_data_xen = pages_data_xen_start;
+    while ( num_pages ) {
+        struct page_info *page;
+        mfn_t entry_mfn = lookup_and_pin_guest_ram_addr(
+            pages_data_guest->pages_list[entries_on_page], &page);
+
+        if ( mfn_eq(entry_mfn, INVALID_MFN) )
+            goto err_unmap;
+
+        shm_buf->pages[shm_buf->page_cnt++] = page;
+        pages_data_xen->pages_list[entries_on_page] = mfn_to_maddr(entry_mfn);
+        entries_on_page++;
+
+        if ( entries_on_page == PAGELIST_ENTRIES_PER_PAGE ) {
+            pages_data_xen->next_page_data = virt_to_maddr(pages_data_xen + 1);
+            pages_data_xen++;
+            gaddr = pages_data_guest->next_page_data;
+
+            unmap_domain_page(pages_data_guest);
+            unpin_guest_ram_addr(guest_mfn);
+
+            guest_mfn = lookup_and_pin_guest_ram_addr(gaddr, NULL);
+            if ( mfn_eq(guest_mfn, INVALID_MFN) )
+                goto err_free;
+
+            pages_data_guest = map_domain_page(guest_mfn);
+            if ( !pages_data_guest )
+                goto err_free;
+            /* Roll over to the next page */
+            entries_on_page = 0;
+        }
+        num_pages--;
+    }
+
+    param->u.tmem.buf_ptr = virt_to_maddr(pages_data_xen_start) | page_offset;
+
+    call->non_contig[idx] = pages_data_xen_start;
+    call->non_contig_order[idx] = order;
+
+    unmap_domain_page(pages_data_guest);
+    unpin_guest_ram_addr(guest_mfn);
+    return true;
+
+err_unmap:
+    unmap_domain_page(pages_data_guest);
+    unpin_guest_ram_addr(guest_mfn);
+    free_shm_buf(ctx, shm_buf->cookie);
+
+err_free:
+    free_xenheap_pages(pages_data_xen_start, order);
+
+    return false;
+}
+
+static bool translate_params(struct domain_ctx *ctx,
+                             struct std_call_ctx *call)
+{
+    unsigned int i;
+    uint32_t attr;
+
+    for ( i = 0; i < call->xen_arg->num_params; i++ ) {
+        attr = call->xen_arg->params[i].attr;
+
+        switch ( attr & OPTEE_MSG_ATTR_TYPE_MASK ) {
+        case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
+        case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
+        case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
+            if ( attr & OPTEE_MSG_ATTR_NONCONTIG ) {
+                if ( !translate_noncontig(ctx, call,
+                                          call->xen_arg->params + i, i) )
+                    return false;
+            }
+            else {
+                gprintk(XENLOG_WARNING, "Guest tries to use old tmem arg\n");
+                return false;
+            }
+            break;
+        case OPTEE_MSG_ATTR_TYPE_NONE:
+        case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
+        case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
+        case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
+        case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
+        case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
+        case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
+            continue;
+        }
+    }
+    return true;
+}
+
+/*
+ * Copy command buffer into xen memory to:
+ * 1) Hide translated addresses from guest
+ * 2) Make sure that guest wouldn't change data in command buffer during call
+ */
+static bool copy_std_request(struct cpu_user_regs *regs,
+                             struct std_call_ctx *call)
+{
+    paddr_t cmd_gaddr, xen_addr;
+
+    cmd_gaddr = (paddr_t)get_user_reg(regs, 1) << 32 |
+        get_user_reg(regs, 2);
+
+    /*
+     * Command buffer should start at page boundary.
+     * This is OP-TEE ABI requirement.
+     */
+    if ( cmd_gaddr & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1) )
+        return false;
+
+    call->guest_arg_mfn = lookup_and_pin_guest_ram_addr(cmd_gaddr, NULL);
+    if ( mfn_eq(call->guest_arg_mfn, INVALID_MFN) )
+        return false;
+
+    call->guest_arg = map_domain_page_global(call->guest_arg_mfn);
+    if ( !call->guest_arg ) {
+        unpin_guest_ram_addr(call->guest_arg_mfn);
+        return false;
+    }
+
+    call->xen_arg = alloc_xenheap_page();
+    if ( !call->xen_arg ) {
+        unpin_guest_ram_addr(call->guest_arg_mfn);
+        return false;
+    }
+
+    memcpy(call->xen_arg, call->guest_arg, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
+
+    xen_addr = virt_to_maddr(call->xen_arg);
+
+    set_user_reg(regs, 1, xen_addr >> 32);
+    set_user_reg(regs, 2, xen_addr & 0xFFFFFFFF);
+
+    return true;
+}
+
+static bool copy_std_request_back(struct domain_ctx *ctx,
+                                  struct cpu_user_regs *regs,
+                                  struct std_call_ctx *call)
+{
+    unsigned int i;
+    uint32_t attr;
+
+    call->guest_arg->ret = call->xen_arg->ret;
+    call->guest_arg->ret_origin = call->xen_arg->ret_origin;
+    call->guest_arg->session = call->xen_arg->session;
+    for ( i = 0; i < call->xen_arg->num_params; i++ ) {
+        attr = call->xen_arg->params[i].attr;
+
+        switch ( attr & OPTEE_MSG_ATTR_TYPE_MASK ) {
+        case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
+        case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
+            call->guest_arg->params[i].u.tmem.size =
+                call->xen_arg->params[i].u.tmem.size;
+            /* fall through */
+        case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
+            free_shm_buf(ctx, call->xen_arg->params[i].u.tmem.shm_ref);
+            continue;
+        case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
+        case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
+            call->guest_arg->params[i].u.value.a =
+                call->xen_arg->params[i].u.value.a;
+            call->guest_arg->params[i].u.value.b =
+                call->xen_arg->params[i].u.value.b;
+            continue;
+        case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
+        case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
+            call->guest_arg->params[i].u.rmem.size =
+                call->xen_arg->params[i].u.rmem.size;
+            continue;
+        case OPTEE_MSG_ATTR_TYPE_NONE:
+        case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
+        case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
+            continue;
+        }
+    }
+
+    return true;
+}
+
+static bool execute_std_call(struct domain_ctx *ctx,
+                             struct cpu_user_regs *regs,
+                             struct std_call_ctx *call)
+{
+    register_t optee_ret;
+    forward_call(regs);
+    optee_ret = get_user_reg(regs, 0);
+
+    if ( OPTEE_SMC_RETURN_IS_RPC(optee_ret) )
+    {
+        call->optee_thread_id = get_user_reg(regs, 3);
+        call->rpc_op = OPTEE_SMC_RETURN_GET_RPC_FUNC(optee_ret);
+        return true;
+    }
+
+    copy_std_request_back(ctx, regs, call);
+
+    if ( call->xen_arg->cmd == OPTEE_MSG_CMD_UNREGISTER_SHM &&
+         call->xen_arg->ret == 0 ) {
+        free_shm_buf(ctx, call->xen_arg->params[0].u.rmem.shm_ref);
+    }
+
+    free_std_call_ctx(ctx, call);
+
+    return true;
+}
+
+static bool handle_std_call(struct domain_ctx *ctx, struct cpu_user_regs *regs)
+{
+    struct std_call_ctx *call;
+    bool ret;
+
+    call = allocate_std_call_ctx(ctx);
+
+    if (!call)
+        return false;
+
+    ret = copy_std_request(regs, call);
+    if ( !ret )
+        goto out;
+
+    /* Now we can safely examine contents of command buffer */
+    if ( OPTEE_MSG_GET_ARG_SIZE(call->xen_arg->num_params) >
+         OPTEE_MSG_NONCONTIG_PAGE_SIZE ) {
+        ret = false;
+        goto out;
+    }
+
+    switch ( call->xen_arg->cmd )
+    {
+    case OPTEE_MSG_CMD_OPEN_SESSION:
+    case OPTEE_MSG_CMD_CLOSE_SESSION:
+    case OPTEE_MSG_CMD_INVOKE_COMMAND:
+    case OPTEE_MSG_CMD_CANCEL:
+    case OPTEE_MSG_CMD_REGISTER_SHM:
+    case OPTEE_MSG_CMD_UNREGISTER_SHM:
+        ret = translate_params(ctx, call);
+        break;
+    default:
+        ret = false;
+    }
+
+    if (!ret)
+        goto out;
+
+    ret = execute_std_call(ctx, regs, call);
+
+out:
+    if (!ret)
+        free_std_call_ctx(ctx, call);
+
+    return ret;
+}
+
+static void handle_rpc_cmd_alloc(struct domain_ctx *ctx,
+                                 struct cpu_user_regs *regs,
+                                 struct std_call_ctx *call,
+                                 struct shm_rpc *shm_rpc)
+{
+    if ( shm_rpc->guest_arg->params[0].attr != 
(OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
+                                            OPTEE_MSG_ATTR_NONCONTIG) )
+    {
+        gprintk(XENLOG_WARNING, "Invalid attrs for shared mem buffer\n");
+        return;
+    }
+
+    /* Last entry in non_contig array is used to hold RPC-allocated buffer */
+    if ( call->non_contig[MAX_NONCONTIG_ENTRIES - 1] )
+    {
+        free_xenheap_pages(call->non_contig[MAX_NONCONTIG_ENTRIES - 1],
+                           call->non_contig_order[MAX_NONCONTIG_ENTRIES - 1]);
+        call->non_contig[MAX_NONCONTIG_ENTRIES - 1] = NULL;
+    }
+    translate_noncontig(ctx, call, shm_rpc->guest_arg->params + 0,
+                        MAX_NONCONTIG_ENTRIES - 1);
+}
+
+static void handle_rpc_cmd(struct domain_ctx *ctx, struct cpu_user_regs *regs,
+                           struct std_call_ctx *call)
+{
+    struct shm_rpc *shm_rpc;
+    uint64_t cookie;
+
+    cookie = get_user_reg(regs, 1) << 32 | get_user_reg(regs, 2);
+
+    shm_rpc = find_shm_rpc(ctx, cookie);
+
+    if ( !shm_rpc )
+    {
+        gprintk(XENLOG_ERR, "Can't find SHM-RPC with cookie %lx\n", cookie);
+        return;
+    }
+
+    switch (shm_rpc->guest_arg->cmd) {
+    case OPTEE_MSG_RPC_CMD_GET_TIME:
+        break;
+    case OPTEE_MSG_RPC_CMD_WAIT_QUEUE:
+        break;
+    case OPTEE_MSG_RPC_CMD_SUSPEND:
+        break;
+    case OPTEE_MSG_RPC_CMD_SHM_ALLOC:
+        handle_rpc_cmd_alloc(ctx, regs, call, shm_rpc);
+        break;
+    case OPTEE_MSG_RPC_CMD_SHM_FREE:
+        free_shm_buf(ctx, shm_rpc->guest_arg->params[0].u.value.b);
+        break;
+    default:
+        break;
+    }
+}
+
+static void handle_rpc_func_alloc(struct domain_ctx *ctx,
+                                  struct cpu_user_regs *regs,
+                                  struct std_call_ctx *call)
+{
+    paddr_t ptr = get_user_reg(regs, 1) << 32 | get_user_reg(regs, 2);
+
+    if ( ptr & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1) )
+        gprintk(XENLOG_WARNING, "Domain returned invalid RPC command 
buffer\n");
+
+    if ( ptr ) {
+        uint64_t cookie = get_user_reg(regs, 4) << 32 | get_user_reg(regs, 5);
+        struct shm_rpc *shm_rpc;
+
+        shm_rpc = allocate_and_map_shm_rpc(ctx, ptr, cookie);
+        if ( !shm_rpc )
+        {
+            gprintk(XENLOG_WARNING, "Failed to allocate shm_rpc object\n");
+            ptr = 0;
+        }
+        else
+            ptr = mfn_to_maddr(shm_rpc->guest_mfn);
+
+        set_user_reg(regs, 1, ptr >> 32);
+        set_user_reg(regs, 2, ptr & 0xFFFFFFFF);
+    }
+}
+
+static bool handle_rpc(struct domain_ctx *ctx, struct cpu_user_regs *regs)
+{
+    struct std_call_ctx *call;
+
+    int optee_thread_id = get_user_reg(regs, 3);
+
+    call = find_call_ctx(ctx, optee_thread_id);
+
+    if ( !call )
+        return false;
+
+    switch ( call->rpc_op ) {
+    case OPTEE_SMC_RPC_FUNC_ALLOC:
+        handle_rpc_func_alloc(ctx, regs, call);
+        break;
+    case OPTEE_SMC_RPC_FUNC_FREE:
+    {
+        uint64_t cookie = get_user_reg(regs, 1) << 32 | get_user_reg(regs, 2);
+        free_shm_rpc(ctx, cookie);
+        break;
+    }
+    case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:
+        break;
+    case OPTEE_SMC_RPC_FUNC_CMD:
+        handle_rpc_cmd(ctx, regs, call);
+        break;
+    }
+
+    return execute_std_call(ctx, regs, call);
+}
+
+static bool handle_exchange_capabilities(struct cpu_user_regs *regs)
+{
+    uint32_t caps;
+
+    /* Filter out unknown guest caps */
+    caps = get_user_reg(regs, 1);
+    caps &= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR;
+    set_user_reg(regs, 1, caps);
+
+    /* Forward call and return error (if any) back to the guest */
+    if ( !forward_call(regs) )
+        return true;
+
+    caps = get_user_reg(regs, 1);
+
+    /* Filter out unknown OP-TEE caps */
+    caps &= OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM |
+        OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM |
+        OPTEE_SMC_SEC_CAP_DYNAMIC_SHM;
+
+    /* Drop static SHM_RPC cap */
+    caps &= ~OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM;
+
+    /* Don't allow guests to work without dynamic SHM */
+    if ( !(caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) ) {
+        set_return(regs, OPTEE_SMC_RETURN_ENOTAVAIL);
+        return true;
+    }
+
+    set_user_reg(regs, 1, caps);
+    return true;
+}
+
+static bool optee_handle_smc(struct cpu_user_regs *regs)
+{
+    struct domain_ctx *ctx;
+
+    ctx = find_domain_ctx(current->domain);
+    if ( !ctx )
+        return false;
+
+    switch ( get_user_reg(regs, 0) )
+    {
+    case OPTEE_SMC_CALLS_COUNT:
+    case OPTEE_SMC_CALLS_UID:
+    case OPTEE_SMC_CALLS_REVISION:
+    case OPTEE_SMC_CALL_GET_OS_UUID:
+    case OPTEE_SMC_FUNCID_GET_OS_REVISION:
+    case OPTEE_SMC_ENABLE_SHM_CACHE:
+    case OPTEE_SMC_DISABLE_SHM_CACHE:
+        forward_call(regs);
+        return true;
+    case OPTEE_SMC_GET_SHM_CONFIG:
+        /* No static SHM available for guests */
+        set_return(regs, OPTEE_SMC_RETURN_ENOTAVAIL);
+        return true;
+    case OPTEE_SMC_EXCHANGE_CAPABILITIES:
+        return handle_exchange_capabilities(regs);
+    case OPTEE_SMC_CALL_WITH_ARG:
+        return handle_std_call(ctx, regs);
+    case OPTEE_SMC_CALL_RETURN_FROM_RPC:
+        return handle_rpc(ctx, regs);
+    default:
+        return false;
+    }
+}
+
+static void optee_remove(void)
+{
+}
+
+static const struct tee_mediator_ops optee_ops =
+{
+    .probe = optee_probe,
+    .domain_create = optee_domain_create,
+    .domain_destroy = optee_domain_destroy,
+    .handle_smc = optee_handle_smc,
+    .remove = optee_remove,
+};
+
+REGISTER_TEE_MEDIATOR(optee, "OP-TEE", &optee_ops);
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/tee/optee_smc.h 
b/xen/include/asm-arm/tee/optee_smc.h
index 26d100e..1c5a247 100644
--- a/xen/include/asm-arm/tee/optee_smc.h
+++ b/xen/include/asm-arm/tee/optee_smc.h
@@ -305,6 +305,56 @@ struct optee_smc_disable_shm_cache_result {
        OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE)
 
 /*
+ * Inform OP-TEE about a new virtual machine
+ *
+ * Hypervisor issues this call during virtual machine (guest) creation.
+ * OP-TEE records VM_ID of new virtual machine and makes self ready
+ * to receive requests from it.
+ *
+ * Call requests usage:
+ * a0  SMC Function ID, OPTEE_SMC_VM_CREATED
+ * a1  VM_ID of newly created virtual machine
+ * a2-6 Not used
+ * a7  Hypervisor Client ID register. Must be 0, because only hypervisor
+ *      can issue this call
+ *
+ * Normal return register usage:
+ * a0  OPTEE_SMC_RETURN_OK
+ * a1-7        Preserved
+ *
+ * Error return:
+ * a0  OPTEE_SMC_RETURN_ENOTAVAIL      OP-TEE has no resources for
+ *                                     another VM
+ * a1-7        Preserved
+ *
+ */
+#define OPTEE_SMC_FUNCID_VM_CREATED    13
+#define OPTEE_SMC_VM_CREATED \
+       OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_VM_CREATED)
+
+/*
+ * Inform OP-TEE about shutdown of a virtual machine
+ *
+ * Hypervisor issues this call during virtual machine (guest) destruction.
+ * OP-TEE will clean up all resources associated with this VM.
+ *
+ * Call requests usage:
+ * a0  SMC Function ID, OPTEE_SMC_VM_DESTROYED
+ * a1  VM_ID of virtual machine being shutted down
+ * a2-6 Not used
+ * a7  Hypervisor Client ID register. Must be 0, because only hypervisor
+ *      can issue this call
+ *
+ * Normal return register usage:
+ * a0  OPTEE_SMC_RETURN_OK
+ * a1-7        Preserved
+ *
+ */
+#define OPTEE_SMC_FUNCID_VM_DESTROYED  14
+#define OPTEE_SMC_VM_DESTROYED \
+       OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_VM_DESTROYED)
+
+/*
  * Resume from RPC (for example after processing a foreign interrupt)
  *
  * Call register usage:
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.