[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 3/6] xen/arm: optee: limit number of shared buffers


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>
  • Date: Wed, 18 Sep 2019 18:50:59 +0000
  • Accept-language: en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=epam.com; dmarc=pass action=none header.from=epam.com; dkim=pass header.d=epam.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; bh=bUVUVQgeO7ZQc9FMlIXiSEBIkKIp00y6FcNfsfQE1+4=; b=NS1zG8GOyhlD7IPiBRF6NrQfwQPheY1Faf4o+6yON4IqVX15vUwPKjXv2+3iDjzY9c2hnpKzezqNwWgpQ4WT0nxaeA0Qpf1uI4l1Hv2/utLnZua42wKRkTyy0HPQFZO4TRNC0vWgbK2r8hzKmVpCXnwT4gW8yPlXXJKnadSkPN7IVvg5MicxEOF2kDwKHSXzI1ced8SAGHTJqijDpefZLgYFPulalllLYouQxjEmsa2uaVz+T9y4e11id2yq3qwkU/dki5Gk5RMcGYH8ce9MmOc/OKb4QlpbkY0rw2+yoyOLohHgZPegCd1KCgtVh87x6WdI8fzAP8TI3+uQlCUDvQ==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=Subn5MWVhJx2Et1EY6/pNkASkvAp6aA5cHxTpAc802c9YZu4yloDZKddIbRtcnYLW78vO3qqj1jMYZ3s6qXvUvugqgE1cM81SvyYCOjaXHuKJq9C1ZM2J2GsxugDACgktkuJKRKEmj9iVjSvvEaduM4gqbSDRNWkeTBqelvL8/cKkdRhUvWAJRK18kMH7YzAtGutfHNJ32pGMrBCt1FmqQaoCM8i2aE7DZYvjrkajF1iD5Zc7NfaNFtrFJ7npFm8INDu2VM96t6FPbgD4kUuGId/vzomFL1YHnQ8QkskPnk87KRqcIEDanLN/RIyYhpI+olIvpQz5dpgWRjKt7TFrw==
  • Authentication-results: spf=none (sender IP is ) smtp.mailfrom=Volodymyr_Babchuk@xxxxxxxx;
  • Cc: "tee-dev@xxxxxxxxxxxxxxxx" <tee-dev@xxxxxxxxxxxxxxxx>, Julien Grall <julien.grall@xxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>
  • Delivery-date: Wed, 18 Sep 2019 18:51:07 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Thread-index: AQHVblIDOzQYaSMlIUe2FmdIoibUfw==
  • Thread-topic: [PATCH v2 3/6] xen/arm: optee: limit number of shared buffers

We want to limit number of shared buffers that guest can register in
OP-TEE. Every such buffer consumes XEN resources and we don't want
guest to exhaust XEN. So we choose arbitrary limit for shared buffers.

Signed-off-by: Volodymyr Babchuk <volodymyr_babchuk@xxxxxxxx>
---
 xen/arch/arm/tee/optee.c | 30 +++++++++++++++++++++++-------
 1 file changed, 23 insertions(+), 7 deletions(-)

diff --git a/xen/arch/arm/tee/optee.c b/xen/arch/arm/tee/optee.c
index 55d11b91a9..88be959819 100644
--- a/xen/arch/arm/tee/optee.c
+++ b/xen/arch/arm/tee/optee.c
@@ -85,6 +85,14 @@
  */
 #define MAX_SHM_BUFFER_PG       129
 
+/*
+ * Limits the number of shared buffers that guest can have at once.
+ * This is to prevent case, when guests tricks XEN into exhausting
+ * own memory by allocating zillions of one-byte buffers. Value is
+ * chosen arbitrary.
+ */
+#define MAX_SHM_BUFFER_COUNT   16
+
 #define OPTEE_KNOWN_NSEC_CAPS OPTEE_SMC_NSEC_CAP_UNIPROCESSOR
 #define OPTEE_KNOWN_SEC_CAPS (OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM | \
                               OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM | \
@@ -146,6 +154,7 @@ struct optee_domain {
     struct list_head optee_shm_buf_list;
     atomic_t call_count;
     atomic_t optee_shm_buf_pages;
+    atomic_t optee_shm_buf_count;
     spinlock_t lock;
 };
 
@@ -233,6 +242,7 @@ static int optee_domain_init(struct domain *d)
     INIT_LIST_HEAD(&ctx->optee_shm_buf_list);
     atomic_set(&ctx->call_count, 0);
     atomic_set(&ctx->optee_shm_buf_pages, 0);
+    atomic_set(&ctx->optee_shm_buf_count, 0);
     spin_lock_init(&ctx->lock);
 
     d->arch.tee = ctx;
@@ -481,23 +491,26 @@ static struct optee_shm_buf 
*allocate_optee_shm_buf(struct optee_domain *ctx,
     struct optee_shm_buf *optee_shm_buf, *optee_shm_buf_tmp;
     int old, new;
     int err_code;
+    int count;
+
+    count = atomic_add_unless(&ctx->optee_shm_buf_count, 1,
+                              MAX_SHM_BUFFER_COUNT);
+    if ( count == MAX_SHM_BUFFER_COUNT )
+        return ERR_PTR(-ENOMEM);
 
     do
     {
         old = atomic_read(&ctx->optee_shm_buf_pages);
         new = old + pages_cnt;
         if ( new >= MAX_TOTAL_SMH_BUF_PG )
-            return ERR_PTR(-ENOMEM);
+        {
+            err_code = -ENOMEM;
+            goto err_dec_cnt;
+        }
     }
     while ( unlikely(old != atomic_cmpxchg(&ctx->optee_shm_buf_pages,
                                            old, new)) );
 
-    /*
-     * TODO: Guest can try to register many small buffers, thus, forcing
-     * XEN to allocate context for every buffer. Probably we need to
-     * limit not only total number of pages pinned but also number
-     * of buffer objects.
-     */
     optee_shm_buf = xzalloc_bytes(sizeof(struct optee_shm_buf) +
                                   pages_cnt * sizeof(struct page *));
     if ( !optee_shm_buf )
@@ -533,6 +546,8 @@ static struct optee_shm_buf *allocate_optee_shm_buf(struct 
optee_domain *ctx,
 err:
     xfree(optee_shm_buf);
     atomic_sub(pages_cnt, &ctx->optee_shm_buf_pages);
+err_dec_cnt:
+    atomic_dec(&ctx->optee_shm_buf_count);
 
     return ERR_PTR(err_code);
 }
@@ -575,6 +590,7 @@ static void free_optee_shm_buf(struct optee_domain *ctx, 
uint64_t cookie)
     free_pg_list(optee_shm_buf);
 
     atomic_sub(optee_shm_buf->page_cnt, &ctx->optee_shm_buf_pages);
+    atomic_dec(&ctx->optee_shm_buf_count);
 
     xfree(optee_shm_buf);
 }
-- 
2.22.0

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.