[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 07/13] optee: add std call handling



Hi,

On 09/03/2018 05:54 PM, Volodymyr Babchuk wrote:
Main way to communicate with OP-TEE is to issue standard SMCCC

NIT: The main way

call. "Standard" is a SMCCC term and it means that call can be
interrupted and OP-TEE can return control to NW before completing
the call.

In contranst with fast calls, where arguments and return values

NIT: s/contranst/contrast/

are passed in registers, standard calls use shared memory. Register
pair r1,r2 holds 64-bit PA of command buffer, where all arguments

Do you mean w1, w2?

are stored and which is used to return data. OP-TEE internally
copies contents of this buffer into own secure memory before accessing
and validating any data in command buffer. This is done to make sure
that NW will not change contents of the validated parameters.

Mediator needs to do the same for number of reasons:

1. To make sure that guest will not change data after validation.
2. To translate IPAs to PAs in the command buffer (this is not done
    in this patch).
3. To hide translated address from guest, so it will not be able
    to do IPA->PA translation by misusing mediator.

Also mediator pins the page with original command buffer because
it will write to it later, when returning response from OP-TEE.

I don't think it is necessary to pin the guest command buffer. You can use guestcopy helper to copy to/from the guest memory.

If the guest modify the P2M at the same time, then it is not our business if something wrong happen. The only things we need to prevent here is writing back to an MFN that does not belong to the domain.


During standard call OP-TEE can issue multiple "RPC returns", asking
NW to do some work for OP-TEE. NW then issues special call
OPTEE_SMC_CALL_RETURN_FROM_RPC to resume handling of the original call.
Thus, mediator needs to maintain context for original standard call
during multiple SMCCC calls.

Standard call is considered complete, when returned value is
not RPC request.

Signed-off-by: Volodymyr Babchuk <volodymyr_babchuk@xxxxxxxx>
---
  xen/arch/arm/tee/optee.c | 319 ++++++++++++++++++++++++++++++++++++++++++++++-
  1 file changed, 316 insertions(+), 3 deletions(-)

diff --git a/xen/arch/arm/tee/optee.c b/xen/arch/arm/tee/optee.c
index c895a99..1008eba 100644
--- a/xen/arch/arm/tee/optee.c
+++ b/xen/arch/arm/tee/optee.c
@@ -12,6 +12,7 @@
   */
#include <xen/device_tree.h>
+#include <xen/domain_page.h>
  #include <xen/sched.h>
  #include <asm/smccc.h>
  #include <asm/tee/tee.h>
@@ -19,9 +20,27 @@
  #include <asm/tee/optee_msg.h>
  #include <asm/tee/optee_smc.h>
+#define MAX_STD_CALLS 16

I suspect this is used to restrict the number of calls in flight. If so, I would appreciate if this is documented in the code and the limitations explained in the commit message.

+
+/*
+ * Call context. OP-TEE can issue multiple RPC returns during one call.
+ * We need to preserve context during them.
+ */
+struct std_call_ctx {
+    struct list_head list;
+    struct optee_msg_arg *guest_arg;
+    struct optee_msg_arg *xen_arg;
+    mfn_t guest_arg_mfn;
+    int optee_thread_id;
+    int rpc_op;
+};
+
  struct domain_ctx {
      struct list_head list;
+    struct list_head call_ctx_list;
      struct domain *domain;
+    atomic_t call_ctx_count;
+    spinlock_t lock;
  };
static LIST_HEAD(domain_ctx_list);
@@ -49,6 +68,44 @@ static bool optee_probe(void)
      return true;
  }
+static mfn_t lookup_and_pin_guest_ram_addr(paddr_t gaddr,
+                                            struct page_info **pg)

I don't think there are need to return both an MFN and a page. You can deduce easily from the other. In this context, it would be better to return a page.

Also, I would prefer if this function take a guest frame address over a guest physical address.

Lastly this function is basically a re-implementation of get_page_from_gfn with a restriction on the p2m type. Please re-implement it using that function.

+{
+    mfn_t mfn;
+    gfn_t gfn;
+    p2m_type_t t;
+    struct page_info *page;
+    struct domain *d = current->domain;
+
+    gfn = gaddr_to_gfn(gaddr);
+    mfn = p2m_lookup(d, gfn, &t);
+
+    if ( t != p2m_ram_rw || mfn_eq(mfn, INVALID_MFN) )
+        return INVALID_MFN;
+
+    page = mfn_to_page(mfn);

mfn_to_page can never failed. If you want to check whether an MFN is valid, then you can use mfn_valid(...).

+    if ( !page )
+        return INVALID_MFN;
+
+    if ( !get_page(page, d) )
+        return INVALID_MFN;
+
+    if ( pg )
+        *pg = page;
+
+    return mfn;
+}
+
+static void unpin_guest_ram_addr(mfn_t mfn)
+{
+    struct page_info *page;
+    page = mfn_to_page(mfn);
+    if ( !page )
+        return;
+
+    put_page(page);
+}
+
  static struct domain_ctx *find_domain_ctx(struct domain* d)
  {
      struct domain_ctx *ctx;
@@ -87,6 +144,10 @@ static int optee_enable(struct domain *d)
      }
ctx->domain = d;
+    INIT_LIST_HEAD(&ctx->call_ctx_list);
+
+    atomic_set(&ctx->call_ctx_count, 0);
+    spin_lock_init(&ctx->lock);
spin_lock(&domain_ctx_list_lock);
      list_add_tail(&ctx->list, &domain_ctx_list);
@@ -134,11 +195,72 @@ static void set_return(struct cpu_user_regs *regs, 
uint32_t ret)
      set_user_reg(regs, 7, 0);
  }
+static struct std_call_ctx *allocate_std_call_ctx(struct domain_ctx *ctx)
+{
+    struct std_call_ctx *call;
+    int count;
+
+    count = atomic_add_unless(&ctx->call_ctx_count, 1, MAX_STD_CALLS);

Please a comment explaining the rationale for this.

+    if ( count == MAX_STD_CALLS )
+        return NULL;
+
+    call = xzalloc(struct std_call_ctx);
+    if ( !call ) {

Coding style:

if
{

+        atomic_dec(&ctx->call_ctx_count);
+        return NULL;
+    }
+
+    call->optee_thread_id = -1;
+
+    spin_lock(&ctx->lock);
+    list_add_tail(&call->list, &ctx->call_ctx_list);
+    spin_unlock(&ctx->lock);
+
+    return call;
+}
+
+static void free_std_call_ctx(struct domain_ctx *ctx, struct std_call_ctx 
*call)
+{
+    atomic_dec(&ctx->call_ctx_count);
+
+    spin_lock(&ctx->lock);
+    list_del(&call->list);
+    spin_unlock(&ctx->lock);
+
+    if ( call->xen_arg )
+        free_xenheap_page(call->xen_arg);

free_xenheap_page is able to cope with NULL pointer.

+
+    if ( call->guest_arg ) {
+        unmap_domain_page_global(call->guest_arg);
+        unpin_guest_ram_addr(call->guest_arg_mfn);
+    }
+
+    xfree(call);
+}
+
+static struct std_call_ctx *find_call_ctx(struct domain_ctx *ctx, int 
thread_id)
+{
+    struct std_call_ctx *call;
+
+    spin_lock(&ctx->lock);
+    list_for_each_entry( call, &ctx->call_ctx_list, list )
+    {
+        if ( call->optee_thread_id == thread_id )
+        {
+                spin_unlock(&ctx->lock);
+                return call;
+        }
+    }
+    spin_unlock(&ctx->lock);
+
+    return NULL;
+}
static void optee_domain_destroy(struct domain *d)
  {
      struct arm_smccc_res resp;
      struct domain_ctx *ctx;
+    struct std_call_ctx *call, *call_tmp;
      bool found = false;
/* At this time all domain VCPUs should be stopped */
@@ -163,9 +285,201 @@ static void optee_domain_destroy(struct domain *d)
      if ( !found )
          return;
+ ASSERT(!spin_is_locked(&ctx->lock));
+
+    list_for_each_entry_safe( call, call_tmp, &ctx->call_ctx_list, list )
+        free_std_call_ctx(ctx, call);
+
+    ASSERT(!atomic_read(&ctx->call_ctx_count));
+
      xfree(ctx);
  }
+/*
+ * Copy command buffer into xen memory to:
+ * 1) Hide translated addresses from guest
+ * 2) Make sure that guest wouldn't change data in command buffer during call
+ */
+static bool copy_std_request(struct cpu_user_regs *regs,
+                             struct std_call_ctx *call)
+{
+    paddr_t cmd_gaddr, xen_addr;
+
+    cmd_gaddr = (paddr_t)get_user_reg(regs, 1) << 32 |
+        get_user_reg(regs, 2);
+
+    /*
+     * Command buffer should start at page boundary.
+     * This is OP-TEE ABI requirement.
+     */
+    if ( cmd_gaddr & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1) )
+        return false;
+
+    call->guest_arg_mfn = lookup_and_pin_guest_ram_addr(cmd_gaddr, NULL);
+    if ( mfn_eq(call->guest_arg_mfn, INVALID_MFN) )
+        return false;
+
+    call->guest_arg = map_domain_page_global(call->guest_arg_mfn);
+    if ( !call->guest_arg ) {
+        unpin_guest_ram_addr(call->guest_arg_mfn);
+        return false;
+    }

It look like to me you want to use access_guest_memory_by_ipa here. This would avoid the page to be mapped global for the duration of the call.

+
+    call->xen_arg = alloc_xenheap_page();

I don't see anything in the code making sure that OPTEE_MSG_NONCONTIG_PAGE_SIZE fits in a Xen page-size. You probably want to add a BUILD_BUG_ON(OPTEE_MSG_NONCONTIG_PAGE > PAGE_SIZE).

Also, for been more future-proof (i.e Xen using an higher page size), it would be better to use _xmalloc(OPTEE_MSG_NONCONTIG_PAGE_SIZE, OPTEE_MSG_NONCONTIG_PAGE_SIZE).

_xmalloc will alloc_xenheap_page() if it is necessary.


+    if ( !call->xen_arg ) {
+        unpin_guest_ram_addr(call->guest_arg_mfn);
+        return false;
+    }
+
+    memcpy(call->xen_arg, call->guest_arg, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
+
+    xen_addr = virt_to_maddr(call->xen_arg);
+
+    set_user_reg(regs, 1, xen_addr >> 32);
+    set_user_reg(regs, 2, xen_addr & 0xFFFFFFFF);
+
+    return true;
+}
+
+static bool copy_std_request_back(struct domain_ctx *ctx,
+                                  struct cpu_user_regs *regs,
+                                  struct std_call_ctx *call)
+{
+    unsigned int i;
+    uint32_t attr;

This function might be a bit tricky to implement using access_guest_memory_by_ipa. However, you can just map the region temporarily. This would avoid to use map_domain_page_global() within then code and avoid one less potentially error in the cleanup path.

+
+    call->guest_arg->ret = call->xen_arg->ret;
+    call->guest_arg->ret_origin = call->xen_arg->ret_origin;
+    call->guest_arg->session = call->xen_arg->session;
+    for ( i = 0; i < call->xen_arg->num_params; i++ ) {

for ( ... )
{

+        attr = call->xen_arg->params[i].attr;
+
+        switch ( attr & OPTEE_MSG_ATTR_TYPE_MASK ) {

switch ( ... )
{

+        case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
+        case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
+            call->guest_arg->params[i].u.tmem.size =
+                call->xen_arg->params[i].u.tmem.size;
+            continue;
+        case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
+        case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
+            call->guest_arg->params[i].u.value.a =
+                call->xen_arg->params[i].u.value.a;
+            call->guest_arg->params[i].u.value.b =
+                call->xen_arg->params[i].u.value.b;
+            continue;
+        case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
+        case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
+            call->guest_arg->params[i].u.rmem.size =
+                call->xen_arg->params[i].u.rmem.size;
+            continue;
+        case OPTEE_MSG_ATTR_TYPE_NONE:
+        case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
+        case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
+            continue;
+        }
+    }
+
+    return true;
+}
+
+static bool execute_std_call(struct domain_ctx *ctx,
+                             struct cpu_user_regs *regs,
+                             struct std_call_ctx *call)
+{
+    register_t optee_ret;
+
+    forward_call(regs);

I find a bit odd that you introduce a return for forward_call in the previous patch. But you barely use it.

I think I would prefer if the function does not return a value and you just rely on get_user_reg(regs, 0).

+
+    optee_ret = get_user_reg(regs, 0);
+    if ( OPTEE_SMC_RETURN_IS_RPC(optee_ret) )
+    {
+        call->optee_thread_id = get_user_reg(regs, 3);
+        call->rpc_op = OPTEE_SMC_RETURN_GET_RPC_FUNC(optee_ret);
+        return true;
+    }
+
+    copy_std_request_back(ctx, regs, call);

Here another function return a value but it is not used. If you don't plan to use the return, then please drop. If you plan to use it, then do it everywhere.

+
+    free_std_call_ctx(ctx, call);
+
+    return true;
+}
+
+static bool handle_std_call(struct domain_ctx *ctx, struct cpu_user_regs *regs)
+{
+    struct std_call_ctx *call;
+    bool ret;
+
+    call = allocate_std_call_ctx(ctx);
+
+    if (!call)
+        return false;
+
+    ret = copy_std_request(regs, call);
+    if ( !ret )
+        goto out;
+
+    /* Now we can safely examine contents of command buffer */
+    if ( OPTEE_MSG_GET_ARG_SIZE(call->xen_arg->num_params) >
+         OPTEE_MSG_NONCONTIG_PAGE_SIZE ) {

if ( ... )
{

+        ret = false;
+        goto out;
+    }
+
+    switch ( call->xen_arg->cmd )
+    {
+    case OPTEE_MSG_CMD_OPEN_SESSION:
+    case OPTEE_MSG_CMD_CLOSE_SESSION:
+    case OPTEE_MSG_CMD_INVOKE_COMMAND:
+    case OPTEE_MSG_CMD_CANCEL:
+    case OPTEE_MSG_CMD_REGISTER_SHM:
+    case OPTEE_MSG_CMD_UNREGISTER_SHM:
+        ret = true;
+        break;
+    default:
+        ret = false;
+    }
+
+    if (!ret)
+        goto out;
+
+    ret = execute_std_call(ctx, regs, call);
+
+out:
+    if (!ret)
+        free_std_call_ctx(ctx, call);
+
+    return ret;
+}
+
+static bool handle_rpc(struct domain_ctx *ctx, struct cpu_user_regs *regs)
+{
+    struct std_call_ctx *call;
+

No need for the newline here.

+    int optee_thread_id = get_user_reg(regs, 3);
+
+    call = find_call_ctx(ctx, optee_thread_id);
+
+    if ( !call )
+        return false;
+
+    switch ( call->rpc_op ) {

switch ( ... )
{

+    case OPTEE_SMC_RPC_FUNC_ALLOC:
+        /* TODO: Add handling */
+        break;
+    case OPTEE_SMC_RPC_FUNC_FREE:
+        /* TODO: Add handling */
+        break;
+    case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:
+        break;
+    case OPTEE_SMC_RPC_FUNC_CMD:
+        /* TODO: Add handling */
+        break;
+    }
+
+    return execute_std_call(ctx, regs, call);
+}
+
  static bool handle_exchange_capabilities(struct cpu_user_regs *regs)
  {
      uint32_t caps;
@@ -225,10 +539,9 @@ static bool optee_handle_call(struct cpu_user_regs *regs)
      case OPTEE_SMC_EXCHANGE_CAPABILITIES:
          return handle_exchange_capabilities(regs);
      case OPTEE_SMC_CALL_WITH_ARG:
+        return handle_std_call(ctx, regs);
      case OPTEE_SMC_CALL_RETURN_FROM_RPC:
-        /* TODO: Add proper handling for this calls */
-        forward_call(regs);
-        return true;
+        return handle_rpc(ctx, regs);
      default:
          return false;
      }


Cheers,

--
Julien Grall

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.