[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v3 07/11] tmem: Make the uint64_t oid[3] a proper structure: tmem_oid



On Tue, Sep 01, 2015 at 09:54:12AM -0600, Jan Beulich wrote:
> >>> On 01.09.15 at 17:23, <konrad.wilk@xxxxxxxxxx> wrote:
> > There is only need for the compat layer once I move the 'xen_tmem_oid_t' to
> > the tmem.h header file - and there is where the introduction in xlat.lst is 
> > done:
> > 
> > --- a/xen/include/xlat.lst
> > +++ b/xen/include/xlat.lst
> > @@ -103,6 +103,7 @@
> >  !      sched_poll                      sched.h
> >  ?      sched_remote_shutdown           sched.h
> >  ?      sched_shutdown                  sched.h
> > +?      xen_tmem_oid                    tmem.h
> 
> Looks like your problem is the xen_ prefix here - no other entity has
> it, despite there being a number of examples where the actual type
> uses such a prefix.

With your awesome hints (thank you!), this all now works. This is what
the patch ended up looking like:


From 9a829efde9a47ca59a5c2029f3706a071342ea23 Mon Sep 17 00:00:00 2001
From: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
Date: Mon, 31 Aug 2015 10:57:27 -0400
Subject: [PATCH v3.1 07/10] tmem: Make the uint64_t oid[3] a proper structure:
 xen_tmem_oid

And use it almost everywhere. It is easy to use it for the
sysctl since the hypervisor and toolstack are intertwined.

But for the tmem hypercall we need to be dilligient (as it
is guest facing) so delaying that to another patch:
"tmem: Use 'struct xen_tmem_oid' for every user" to help
with bisection issues.

We also move some of the parameters on functions to be within
the right location.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
---
 tools/libxc/include/xenctrl.h |  6 +----
 tools/libxc/xc_tmem.c         | 16 ++++++------
 xen/common/tmem.c             | 57 +++++++++++++++++++++++--------------------
 xen/include/public/sysctl.h   |  8 +++++-
 4 files changed, 46 insertions(+), 41 deletions(-)

diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index 9e34769..2000f12 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -2295,13 +2295,9 @@ int xc_disable_turbo(xc_interface *xch, int cpuid);
  * tmem operations
  */
 
-struct tmem_oid {
-    uint64_t oid[3];
-};
-
 int xc_tmem_control_oid(xc_interface *xch, int32_t pool_id, uint32_t subop,
                         uint32_t cli_id, uint32_t arg1, uint32_t arg2,
-                        struct tmem_oid oid, void *buf);
+                        struct xen_tmem_oid oid, void *buf);
 int xc_tmem_control(xc_interface *xch,
                     int32_t pool_id, uint32_t subop, uint32_t cli_id,
                     uint32_t arg1, uint32_t arg2, void *buf);
diff --git a/tools/libxc/xc_tmem.c b/tools/libxc/xc_tmem.c
index 505595b..558d2ea 100644
--- a/tools/libxc/xc_tmem.c
+++ b/tools/libxc/xc_tmem.c
@@ -64,9 +64,9 @@ int xc_tmem_control(xc_interface *xch,
     sysctl.u.tmem_op.arg1 = arg1;
     sysctl.u.tmem_op.arg2 = arg2;
     sysctl.u.tmem_op.pad = 0;
-    sysctl.u.tmem_op.oid[0] = 0;
-    sysctl.u.tmem_op.oid[1] = 0;
-    sysctl.u.tmem_op.oid[2] = 0;
+    sysctl.u.tmem_op.oid.oid[0] = 0;
+    sysctl.u.tmem_op.oid.oid[1] = 0;
+    sysctl.u.tmem_op.oid.oid[2] = 0;
 
     if ( cmd == XEN_SYSCTL_TMEM_OP_LIST && arg1 != 0 )
     {
@@ -98,7 +98,7 @@ int xc_tmem_control_oid(xc_interface *xch,
                         uint32_t cli_id,
                         uint32_t arg1,
                         uint32_t arg2,
-                        struct tmem_oid oid,
+                        struct xen_tmem_oid oid,
                         void *buf)
 {
     DECLARE_SYSCTL;
@@ -112,9 +112,7 @@ int xc_tmem_control_oid(xc_interface *xch,
     sysctl.u.tmem_op.arg1 = arg1;
     sysctl.u.tmem_op.arg2 = arg2;
     sysctl.u.tmem_op.pad = 0;
-    sysctl.u.tmem_op.oid[0] = oid.oid[0];
-    sysctl.u.tmem_op.oid[1] = oid.oid[1];
-    sysctl.u.tmem_op.oid[2] = oid.oid[2];
+    sysctl.u.tmem_op.oid = oid;
 
     if ( cmd == XEN_SYSCTL_TMEM_OP_LIST && arg1 != 0 )
     {
@@ -453,7 +451,7 @@ int xc_tmem_restore(xc_interface *xch, int dom, int io_fd)
         }
         for ( j = n_pages; j > 0; j-- )
         {
-            struct tmem_oid oid;
+            struct xen_tmem_oid oid;
             uint32_t index;
             int rc;
             if ( read_exact(io_fd, &oid, sizeof(oid)) )
@@ -487,7 +485,7 @@ int xc_tmem_restore(xc_interface *xch, int dom, int io_fd)
 int xc_tmem_restore_extra(xc_interface *xch, int dom, int io_fd)
 {
     uint32_t pool_id;
-    struct tmem_oid oid;
+    struct xen_tmem_oid oid;
     uint32_t index;
     int count = 0;
     int checksum = 0;
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index c73add5..5260c6c 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -125,12 +125,8 @@ struct tmem_pool {
 #define is_persistent(_p)  (_p->persistent)
 #define is_shared(_p)      (_p->shared)
 
-struct oid {
-    uint64_t oid[3];
-};
-
 struct tmem_object_root {
-    struct oid oid;
+    struct xen_tmem_oid oid;
     struct rb_node rb_tree_node; /* protected by pool->pool_rwlock */
     unsigned long objnode_count; /* atomicity depends on obj_spinlock */
     long pgp_count; /* atomicity depends on obj_spinlock */
@@ -158,7 +154,7 @@ struct tmem_page_descriptor {
             };
             struct tmem_object_root *obj;
         } us;
-        struct oid inv_oid;  /* used for invalid list only */
+        struct xen_tmem_oid inv_oid;  /* used for invalid list only */
     };
     pagesize_t size; /* 0 == PAGE_SIZE (pfp), -1 == data invalid,
                     else compressed data (cdata) */
@@ -815,7 +811,8 @@ static void rtn_free(struct radix_tree_node *rtn, void *arg)
 
 /************ POOL OBJECT COLLECTION MANIPULATION ROUTINES *******************/
 
-static int oid_compare(struct oid *left, struct oid *right)
+static int oid_compare(struct xen_tmem_oid *left,
+                       struct xen_tmem_oid *right)
 {
     if ( left->oid[2] == right->oid[2] )
     {
@@ -839,19 +836,20 @@ static int oid_compare(struct oid *left, struct oid 
*right)
         return 1;
 }
 
-static void oid_set_invalid(struct oid *oidp)
+static void oid_set_invalid(struct xen_tmem_oid *oidp)
 {
     oidp->oid[0] = oidp->oid[1] = oidp->oid[2] = -1UL;
 }
 
-static unsigned oid_hash(struct oid *oidp)
+static unsigned oid_hash(struct xen_tmem_oid *oidp)
 {
     return (tmem_hash(oidp->oid[0] ^ oidp->oid[1] ^ oidp->oid[2],
                      BITS_PER_LONG) & OBJ_HASH_BUCKETS_MASK);
 }
 
 /* searches for object==oid in pool, returns locked object if found */
-static struct tmem_object_root * obj_find(struct tmem_pool *pool, struct oid 
*oidp)
+static struct tmem_object_root * obj_find(struct tmem_pool *pool,
+                                          struct xen_tmem_oid *oidp)
 {
     struct rb_node *node;
     struct tmem_object_root *obj;
@@ -887,7 +885,7 @@ restart_find:
 static void obj_free(struct tmem_object_root *obj)
 {
     struct tmem_pool *pool;
-    struct oid old_oid;
+    struct xen_tmem_oid old_oid;
 
     ASSERT_SPINLOCK(&obj->obj_spinlock);
     ASSERT(obj != NULL);
@@ -946,7 +944,8 @@ static int obj_rb_insert(struct rb_root *root, struct 
tmem_object_root *obj)
  * allocate, initialize, and insert an tmem_object_root
  * (should be called only if find failed)
  */
-static struct tmem_object_root * obj_alloc(struct tmem_pool *pool, struct oid 
*oidp)
+static struct tmem_object_root * obj_alloc(struct tmem_pool *pool,
+                                           struct xen_tmem_oid *oidp)
 {
     struct tmem_object_root *obj;
 
@@ -1531,8 +1530,8 @@ cleanup:
 }
 
 static int do_tmem_put(struct tmem_pool *pool,
-              struct oid *oidp, uint32_t index,
-              xen_pfn_t cmfn, tmem_cli_va_param_t clibuf)
+                       struct xen_tmem_oid *oidp, uint32_t index,
+                       xen_pfn_t cmfn, tmem_cli_va_param_t clibuf)
 {
     struct tmem_object_root *obj = NULL;
     struct tmem_page_descriptor *pgp = NULL;
@@ -1696,8 +1695,9 @@ unlock_obj:
     return ret;
 }
 
-static int do_tmem_get(struct tmem_pool *pool, struct oid *oidp, uint32_t 
index,
-              xen_pfn_t cmfn, tmem_cli_va_param_t clibuf)
+static int do_tmem_get(struct tmem_pool *pool,
+                       struct xen_tmem_oid *oidp, uint32_t index,
+                       xen_pfn_t cmfn, tmem_cli_va_param_t clibuf)
 {
     struct tmem_object_root *obj;
     struct tmem_page_descriptor *pgp;
@@ -1774,7 +1774,8 @@ bad_copy:
     return rc;
 }
 
-static int do_tmem_flush_page(struct tmem_pool *pool, struct oid *oidp, 
uint32_t index)
+static int do_tmem_flush_page(struct tmem_pool *pool,
+                              struct xen_tmem_oid *oidp, uint32_t index)
 {
     struct tmem_object_root *obj;
     struct tmem_page_descriptor *pgp;
@@ -1807,7 +1808,8 @@ out:
         return 1;
 }
 
-static int do_tmem_flush_object(struct tmem_pool *pool, struct oid *oidp)
+static int do_tmem_flush_object(struct tmem_pool *pool,
+                                struct xen_tmem_oid *oidp)
 {
     struct tmem_object_root *obj;
 
@@ -2432,7 +2434,7 @@ static int tmemc_save_get_next_page(int cli_id, uint32_t 
pool_id,
     struct tmem_pool *pool = (client == NULL || pool_id >= 
MAX_POOLS_PER_DOMAIN)
                    ? NULL : client->pools[pool_id];
     struct tmem_page_descriptor *pgp;
-    struct oid oid;
+    struct xen_tmem_oid oid;
     int ret = 0;
     struct tmem_handle h;
 
@@ -2525,8 +2527,10 @@ out:
     return ret;
 }
 
-static int tmemc_restore_put_page(int cli_id, uint32_t pool_id, struct oid 
*oidp,
-                      uint32_t index, tmem_cli_va_param_t buf, uint32_t 
bufsize)
+static int tmemc_restore_put_page(int cli_id, uint32_t pool_id,
+                                  struct xen_tmem_oid *oidp,
+                                  uint32_t index, tmem_cli_va_param_t buf,
+                                  uint32_t bufsize)
 {
     struct client *client = tmem_client_from_cli_id(cli_id);
     struct tmem_pool *pool = (client == NULL || pool_id >= 
MAX_POOLS_PER_DOMAIN)
@@ -2542,8 +2546,9 @@ static int tmemc_restore_put_page(int cli_id, uint32_t 
pool_id, struct oid *oidp
     return do_tmem_put(pool, oidp, index, 0, buf);
 }
 
-static int tmemc_restore_flush_page(int cli_id, uint32_t pool_id, struct oid 
*oidp,
-                        uint32_t index)
+static int tmemc_restore_flush_page(int cli_id, uint32_t pool_id,
+                                    struct xen_tmem_oid *oidp,
+                                    uint32_t index)
 {
     struct client *client = tmem_client_from_cli_id(cli_id);
     struct tmem_pool *pool = (client == NULL || pool_id >= 
MAX_POOLS_PER_DOMAIN)
@@ -2559,7 +2564,7 @@ int tmem_control(struct xen_sysctl_tmem_op *op)
     int ret;
     uint32_t pool_id = op->pool_id;
     uint32_t cmd = op->cmd;
-    struct oid *oidp = (struct oid *)(&op->oid[0]);
+    struct xen_tmem_oid *oidp = &op->oid;
 
     if ( op->pad != 0 )
         return -EINVAL;
@@ -2633,7 +2638,7 @@ long do_tmem_op(tmem_cli_op_t uops)
     struct tmem_op op;
     struct client *client = current->domain->tmem_client;
     struct tmem_pool *pool = NULL;
-    struct oid *oidp;
+    struct xen_tmem_oid *oidp;
     int rc = 0;
     bool_t succ_get = 0, succ_put = 0;
     bool_t non_succ_get = 0, non_succ_put = 0;
@@ -2714,7 +2719,7 @@ long do_tmem_op(tmem_cli_op_t uops)
             write_unlock(&tmem_rwlock);
             read_lock(&tmem_rwlock);
 
-            oidp = (struct oid *)&op.u.gen.oid[0];
+            oidp = container_of(&op.u.gen.oid[0], struct xen_tmem_oid, oid[0]);
             switch ( op.cmd )
             {
             case TMEM_NEW_POOL:
diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h
index fc4709a..2d7580b 100644
--- a/xen/include/public/sysctl.h
+++ b/xen/include/public/sysctl.h
@@ -737,6 +737,12 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cat_op_t);
 #define XEN_SYSCTL_TMEM_OP_RESTORE_PUT_PAGE       32
 #define XEN_SYSCTL_TMEM_OP_RESTORE_FLUSH_PAGE     33
 
+struct xen_tmem_oid {
+    uint64_t oid[3];
+};
+typedef struct xen_tmem_oid xen_tmem_oid_t;
+DEFINE_XEN_GUEST_HANDLE(xen_tmem_oid_t);
+
 struct xen_sysctl_tmem_op {
     uint32_t cmd;       /* IN: XEN_SYSCTL_TMEM_OP_* . */
     int32_t pool_id;    /* IN: 0 by default unless _SAVE_*, RESTORE_* .*/
@@ -746,7 +752,7 @@ struct xen_sysctl_tmem_op {
     uint32_t arg1;      /* IN: If not applicable to command use 0. */
     uint32_t arg2;      /* IN: If not applicable to command use 0. */
     uint32_t pad;       /* Padding so structure is the same under 32 and 64. */
-    uint64_t oid[3];    /* IN: If not applicable to command use 0s. */
+    xen_tmem_oid_t oid; /* IN: If not applicable to command use 0s. */
     XEN_GUEST_HANDLE_64(char) buf; /* IN/OUT: Buffer to save and restore ops. 
*/
 };
 typedef struct xen_sysctl_tmem_op xen_sysctl_tmem_op_t;
-- 
2.1.0

> 
> Jan
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.