[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v1 05/12] tmem: Delete deduplication (and tze) code.



Couple of reasons:
 - It can lead to security issues (see row-hammer, KSM and such
   attacks).
 - Code is quite complex.
 - Deduplication is good if the pages themselves are the same
   but that is hardly guaranteed.
 - We got some gains (if pages are deduped) but at the cost of
   making code less maintainable.
 - tze depends on deduplication code.

As such, deleting it.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
---
v1: First submission.
---
 docs/misc/xen-command-line.markdown |   6 -
 xen/common/tmem.c                   | 361 +-----------------------------------
 xen/common/tmem_control.c           |   9 -
 xen/common/tmem_xen.c               |  34 ----
 xen/include/xen/tmem_xen.h          | 123 ------------
 5 files changed, 1 insertion(+), 532 deletions(-)

diff --git a/docs/misc/xen-command-line.markdown 
b/docs/misc/xen-command-line.markdown
index 8ff57fa..cd9534b 100644
--- a/docs/misc/xen-command-line.markdown
+++ b/docs/misc/xen-command-line.markdown
@@ -1500,15 +1500,9 @@ pages) must also be specified via the tbuf\_size 
parameter.
 ### tmem\_compress
 > `= <boolean>`
 
-### tmem\_dedup
-> `= <boolean>`
-
 ### tmem\_shared\_auth
 > `= <boolean>`
 
-### tmem\_tze
-> `= <integer>`
-
 ### tsc
 > `= unstable | skewed | stable:socket`
 
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index b673120..cf5271c 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -71,14 +71,7 @@ struct tmem_page_descriptor {
     pagesize_t size; /* 0 == PAGE_SIZE (pfp), -1 == data invalid,
                     else compressed data (cdata). */
     uint32_t index;
-#ifdef CONFIG_TMEM_DEDUP
-    /* Must hold pcd_tree_rwlocks[firstbyte] to use pcd pointer/siblings. */
-    uint16_t firstbyte; /* NON_SHAREABLE->pfp  otherwise->pcd. */
-#endif
     bool_t eviction_attempted;  /* CHANGE TO lifetimes? (settable). */
-#ifdef CONFIG_TMEM_DEDUP
-    struct list_head pcd_siblings;
-#endif
     union {
         struct page_info *pfp;  /* Page frame pointer. */
         char *cdata; /* Compressed data. */
@@ -96,25 +89,12 @@ struct tmem_page_content_descriptor {
     union {
         struct page_info *pfp;  /* Page frame pointer. */
         char *cdata; /* If compression_enabled. */
-#ifdef CONFIG_TMEM_TZE
-        char *tze; /* If !compression_enabled, trailing zeroes eliminated. */
-#endif
     };
-#ifdef CONFIG_TMEM_DEDUP
-    struct list_head pgp_list;
-    struct rb_node pcd_rb_tree_node;
-    uint32_t pgp_ref_count;
-#endif
     pagesize_t size; /* If compression_enabled -> 0<size<PAGE_SIZE (*cdata)
                      * else if tze, 0<=size<PAGE_SIZE, rounded up to mult of 8
                      * else PAGE_SIZE -> *pfp. */
 };
 
-#ifdef CONFIG_TMEM_DEDUP
-struct rb_root pcd_tree_roots[256]; /* Choose based on first byte of page. */
-rwlock_t pcd_tree_rwlocks[256]; /* Poor man's concurrency for now. */
-#endif
-
 static int tmem_initialized = 0;
 
 struct xmem_pool *tmem_mempool = 0;
@@ -273,232 +253,6 @@ static void tmem_persistent_pool_page_put(void *page_va)
  */
 #define NOT_SHAREABLE ((uint16_t)-1UL)
 
-#ifdef CONFIG_TMEM_DEDUP
-static int pcd_copy_to_client(xen_pfn_t cmfn, struct tmem_page_descriptor *pgp)
-{
-    uint8_t firstbyte = pgp->firstbyte;
-    struct tmem_page_content_descriptor *pcd;
-    int ret;
-
-    ASSERT(tmem_dedup_enabled());
-    read_lock(&pcd_tree_rwlocks[firstbyte]);
-    pcd = pgp->pcd;
-    if ( pgp->size < PAGE_SIZE && pgp->size != 0 &&
-         pcd->size < PAGE_SIZE && pcd->size != 0 )
-        ret = tmem_decompress_to_client(cmfn, pcd->cdata, pcd->size,
-                                       tmem_cli_buf_null);
-    else if ( tmem_tze_enabled() && pcd->size < PAGE_SIZE )
-        ret = tmem_copy_tze_to_client(cmfn, pcd->tze, pcd->size);
-    else
-        ret = tmem_copy_to_client(cmfn, pcd->pfp, tmem_cli_buf_null);
-    read_unlock(&pcd_tree_rwlocks[firstbyte]);
-    return ret;
-}
-
-/*
- * Ensure pgp no longer points to pcd, nor vice-versa.
- * Take pcd rwlock unless have_pcd_rwlock is set, always unlock when done.
- */
-static void pcd_disassociate(struct tmem_page_descriptor *pgp, struct 
tmem_pool *pool, bool_t have_pcd_rwlock)
-{
-    struct tmem_page_content_descriptor *pcd = pgp->pcd;
-    struct page_info *pfp = pgp->pcd->pfp;
-    uint16_t firstbyte = pgp->firstbyte;
-    char *pcd_tze = pgp->pcd->tze;
-    pagesize_t pcd_size = pcd->size;
-    pagesize_t pgp_size = pgp->size;
-    char *pcd_cdata = pgp->pcd->cdata;
-    pagesize_t pcd_csize = pgp->pcd->size;
-
-    ASSERT(tmem_dedup_enabled());
-    ASSERT(firstbyte != NOT_SHAREABLE);
-    ASSERT(firstbyte < 256);
-
-    if ( have_pcd_rwlock )
-        ASSERT_WRITELOCK(&pcd_tree_rwlocks[firstbyte]);
-    else
-        write_lock(&pcd_tree_rwlocks[firstbyte]);
-    list_del_init(&pgp->pcd_siblings);
-    pgp->pcd = NULL;
-    pgp->firstbyte = NOT_SHAREABLE;
-    pgp->size = -1;
-    if ( --pcd->pgp_ref_count )
-    {
-        write_unlock(&pcd_tree_rwlocks[firstbyte]);
-        return;
-    }
-
-    /* No more references to this pcd, recycle it and the physical page. */
-    ASSERT(list_empty(&pcd->pgp_list));
-    pcd->pfp = NULL;
-    /* Remove pcd from rbtree. */
-    rb_erase(&pcd->pcd_rb_tree_node,&pcd_tree_roots[firstbyte]);
-    /* Reinit the struct for safety for now. */
-    RB_CLEAR_NODE(&pcd->pcd_rb_tree_node);
-    /* Now free up the pcd memory. */
-    tmem_free(pcd, NULL);
-    atomic_dec_and_assert(global_pcd_count);
-    if ( pgp_size != 0 && pcd_size < PAGE_SIZE )
-    {
-        /* Compressed data. */
-        tmem_free(pcd_cdata, pool);
-        tmem_stats.pcd_tot_csize -= pcd_csize;
-    }
-    else if ( pcd_size != PAGE_SIZE )
-    {
-        /* Trailing zero data. */
-        tmem_stats.pcd_tot_tze_size -= pcd_size;
-        if ( pcd_size )
-            tmem_free(pcd_tze, pool);
-    } else {
-        /* Real physical page. */
-        if ( tmem_tze_enabled() )
-            tmem_stats.pcd_tot_tze_size -= PAGE_SIZE;
-        if ( tmem_compression_enabled() )
-            tmem_stats.pcd_tot_csize -= PAGE_SIZE;
-        tmem_free_page(pool,pfp);
-    }
-    write_unlock(&pcd_tree_rwlocks[firstbyte]);
-}
-
-
-static int pcd_associate(struct tmem_page_descriptor *pgp, char *cdata, 
pagesize_t csize)
-{
-    struct rb_node **new, *parent = NULL;
-    struct rb_root *root;
-    struct tmem_page_content_descriptor *pcd;
-    int cmp;
-    pagesize_t pfp_size = 0;
-    uint8_t firstbyte = (cdata == NULL) ? tmem_get_first_byte(pgp->pfp) : 
*cdata;
-    int ret = 0;
-
-    if ( !tmem_dedup_enabled() )
-        return 0;
-    ASSERT(pgp->us.obj != NULL);
-    ASSERT(pgp->us.obj->pool != NULL);
-    ASSERT(!pgp->us.obj->pool->persistent);
-    if ( cdata == NULL )
-    {
-        ASSERT(pgp->pfp != NULL);
-        pfp_size = PAGE_SIZE;
-        if ( tmem_tze_enabled() )
-        {
-            pfp_size = tmem_tze_pfp_scan(pgp->pfp);
-            if ( pfp_size > PCD_TZE_MAX_SIZE )
-                pfp_size = PAGE_SIZE;
-        }
-        ASSERT(pfp_size <= PAGE_SIZE);
-        ASSERT(!(pfp_size & (sizeof(uint64_t)-1)));
-    }
-    write_lock(&pcd_tree_rwlocks[firstbyte]);
-
-    /* Look for page match. */
-    root = &pcd_tree_roots[firstbyte];
-    new = &(root->rb_node);
-    while ( *new )
-    {
-        pcd = container_of(*new, struct tmem_page_content_descriptor, 
pcd_rb_tree_node);
-        parent = *new;
-        /* Compare new entry and rbtree entry, set cmp accordingly. */
-        if ( cdata != NULL )
-        {
-            if ( pcd->size < PAGE_SIZE )
-                /* Both new entry and rbtree entry are compressed. */
-                cmp = tmem_pcd_cmp(cdata,csize,pcd->cdata,pcd->size);
-            else
-                /* New entry is compressed, rbtree entry is not. */
-                cmp = -1;
-        } else if ( pcd->size < PAGE_SIZE )
-            /* Rbtree entry is compressed, rbtree entry is not. */
-            cmp = 1;
-        else if ( tmem_tze_enabled() ) {
-            if ( pcd->size < PAGE_SIZE )
-                /* Both new entry and rbtree entry are trailing zero. */
-                cmp = tmem_tze_pfp_cmp(pgp->pfp,pfp_size,pcd->tze,pcd->size);
-            else
-                /* New entry is trailing zero, rbtree entry is not. */
-                cmp = tmem_tze_pfp_cmp(pgp->pfp,pfp_size,pcd->pfp,PAGE_SIZE);
-        } else  {
-            /* Both new entry and rbtree entry are full physical pages. */
-            ASSERT(pgp->pfp != NULL);
-            ASSERT(pcd->pfp != NULL);
-            cmp = tmem_page_cmp(pgp->pfp,pcd->pfp);
-        }
-
-        /* Walk tree or match depending on cmp. */
-        if ( cmp < 0 )
-            new = &((*new)->rb_left);
-        else if ( cmp > 0 )
-            new = &((*new)->rb_right);
-        else
-        {
-            /*
-             * Match! if not compressed, free the no-longer-needed page
-             * but if compressed, data is assumed static so don't free!
-             */
-            if ( cdata == NULL )
-                tmem_free_page(pgp->us.obj->pool,pgp->pfp);
-            tmem_stats.deduped_puts++;
-            goto match;
-        }
-    }
-
-    /* Exited while loop with no match, so alloc a pcd and put it in the tree. 
*/
-    if ( (pcd = tmem_malloc(sizeof(struct tmem_page_content_descriptor), 
NULL)) == NULL )
-    {
-        ret = -ENOMEM;
-        goto unlock;
-    } else if ( cdata != NULL ) {
-        if ( (pcd->cdata = tmem_malloc(csize,pgp->us.obj->pool)) == NULL )
-        {
-            tmem_free(pcd, NULL);
-            ret = -ENOMEM;
-            goto unlock;
-        }
-    }
-    atomic_inc_and_max(global_pcd_count);
-    RB_CLEAR_NODE(&pcd->pcd_rb_tree_node);  /* Is this necessary? */
-    INIT_LIST_HEAD(&pcd->pgp_list);  /* Is this necessary? */
-    pcd->pgp_ref_count = 0;
-    if ( cdata != NULL )
-    {
-        memcpy(pcd->cdata,cdata,csize);
-        pcd->size = csize;
-        tmem_stats.pcd_tot_csize += csize;
-    } else if ( pfp_size == 0 ) {
-        ASSERT(tmem_tze_enabled());
-        pcd->size = 0;
-        pcd->tze = NULL;
-    } else if ( pfp_size < PAGE_SIZE &&
-         ((pcd->tze = tmem_malloc(pfp_size,pgp->us.obj->pool)) != NULL) ) {
-        tmem_tze_copy_from_pfp(pcd->tze,pgp->pfp,pfp_size);
-        pcd->size = pfp_size;
-        tmem_stats.pcd_tot_tze_size += pfp_size;
-        tmem_free_page(pgp->us.obj->pool,pgp->pfp);
-    } else {
-        pcd->pfp = pgp->pfp;
-        pcd->size = PAGE_SIZE;
-        if ( tmem_tze_enabled() )
-            tmem_stats.pcd_tot_tze_size += PAGE_SIZE;
-        if ( tmem_compression_enabled() )
-            tmem_stats.pcd_tot_csize += PAGE_SIZE;
-    }
-    rb_link_node(&pcd->pcd_rb_tree_node, parent, new);
-    rb_insert_color(&pcd->pcd_rb_tree_node, root);
-
-match:
-    pcd->pgp_ref_count++;
-    list_add(&pgp->pcd_siblings,&pcd->pgp_list);
-    pgp->firstbyte = firstbyte;
-    pgp->eviction_attempted = 0;
-    pgp->pcd = pcd;
-
-unlock:
-    write_unlock(&pcd_tree_rwlocks[firstbyte]);
-    return ret;
-}
-#endif
-
 /************ PAGE DESCRIPTOR MANIPULATION ROUTINES *******************/
 
 /* Allocate a struct tmem_page_descriptor and associate it with an object. */
@@ -516,14 +270,6 @@ static struct tmem_page_descriptor *pgp_alloc(struct 
tmem_object_root *obj)
     INIT_LIST_HEAD(&pgp->global_eph_pages);
     INIT_LIST_HEAD(&pgp->us.client_eph_pages);
     pgp->pfp = NULL;
-#ifdef CONFIG_TMEM_DEDUP
-    if ( tmem_dedup_enabled() )
-    {
-        pgp->firstbyte = NOT_SHAREABLE;
-        pgp->eviction_attempted = 0;
-        INIT_LIST_HEAD(&pgp->pcd_siblings);
-    }
-#endif
     pgp->size = -1;
     pgp->index = -1;
     pgp->timestamp = get_cycles();
@@ -548,11 +294,6 @@ static void pgp_free_data(struct tmem_page_descriptor 
*pgp, struct tmem_pool *po
 
     if ( pgp->pfp == NULL )
         return;
-#ifdef CONFIG_TMEM_DEDUP
-    if ( tmem_dedup_enabled() && pgp->firstbyte != NOT_SHAREABLE )
-        pcd_disassociate(pgp,pool,0); /* pgp->size lost. */
-    else
-#endif
     if ( pgp_size )
         tmem_free(pgp->cdata, pool);
     else
@@ -1103,8 +844,6 @@ static struct client *client_create(domid_t cli_id)
 
     client->cli_id = cli_id;
     client->compress = tmem_compression_enabled();
-#ifdef CONFIG_TMEM_DEDUP
-#endif
     client->shared_auth_required = tmem_shared_auth();
     for ( i = 0; i < MAX_GLOBAL_SHARED_POOLS; i++)
         client->shared_auth_uuid[i][0] =
@@ -1161,35 +900,11 @@ static bool_t tmem_try_to_evict_pgp(struct 
tmem_page_descriptor *pgp, bool_t *ho
 {
     struct tmem_object_root *obj = pgp->us.obj;
     struct tmem_pool *pool = obj->pool;
-#ifdef CONFIG_TMEM_DEDUP
-    struct client *client = pool->client;
-    uint16_t firstbyte = pgp->firstbyte;
-#endif
 
     if ( pool->is_dying )
         return 0;
     if ( spin_trylock(&obj->obj_spinlock) )
     {
-#ifdef CONFIG_TMEM_DEDUP
-        if ( tmem_dedup_enabled() )
-        {
-            firstbyte = pgp->firstbyte;
-            if ( firstbyte ==  NOT_SHAREABLE )
-                goto obj_unlock;
-            ASSERT(firstbyte < 256);
-            if ( !write_trylock(&pcd_tree_rwlocks[firstbyte]) )
-                goto obj_unlock;
-            if ( pgp->pcd->pgp_ref_count > 1 && !pgp->eviction_attempted )
-            {
-                pgp->eviction_attempted++;
-                list_del(&pgp->global_eph_pages);
-                
list_add_tail(&pgp->global_eph_pages,&tmem_global.ephemeral_page_list);
-                list_del(&pgp->us.client_eph_pages);
-                
list_add_tail(&pgp->us.client_eph_pages,&client->ephemeral_page_list);
-                goto pcd_unlock;
-            }
-        }
-#endif
         if ( obj->pgp_count > 1 )
             return 1;
         if ( write_trylock(&pool->pool_rwlock) )
@@ -1197,12 +912,6 @@ static bool_t tmem_try_to_evict_pgp(struct 
tmem_page_descriptor *pgp, bool_t *ho
             *hold_pool_rwlock = 1;
             return 1;
         }
-#ifdef CONFIG_TMEM_DEDUP
-pcd_unlock:
-        if ( tmem_dedup_enabled() )
-            write_unlock(&pcd_tree_rwlocks[firstbyte]);
-obj_unlock:
-#endif
         spin_unlock(&obj->obj_spinlock);
     }
     return 0;
@@ -1258,13 +967,6 @@ found:
     ASSERT_SPINLOCK(&obj->obj_spinlock);
     pgp_del = pgp_delete_from_obj(obj, pgp->index);
     ASSERT(pgp_del == pgp);
-#ifdef CONFIG_TMEM_DEDUP
-    if ( tmem_dedup_enabled() && pgp->firstbyte != NOT_SHAREABLE )
-    {
-        ASSERT(pgp->pcd->pgp_ref_count == 1 || pgp->eviction_attempted);
-        pcd_disassociate(pgp,pool,1);
-    }
-#endif
 
     /* pgp already delist, so call pgp_free directly. */
     pgp_free(pgp);
@@ -1331,11 +1033,6 @@ static int do_tmem_put_compress(struct 
tmem_page_descriptor *pgp, xen_pfn_t cmfn
     else if ( (size == 0) || (size >= tmem_mempool_maxalloc) ) {
         ret = 0;
         goto out;
-#ifdef CONFIG_TMEM_DEDUP
-    } else if ( tmem_dedup_enabled() && !is_persistent(pgp->us.obj->pool) ) {
-        if ( (ret = pcd_associate(pgp,dst,size)) == -ENOMEM )
-            goto out;
-#endif
     } else if ( (p = tmem_malloc(size,pgp->us.obj->pool)) == NULL ) {
         ret = -ENOMEM;
         goto out;
@@ -1395,13 +1092,6 @@ copy_uncompressed:
     ret = tmem_copy_from_client(pgp->pfp, cmfn, tmem_cli_buf_null);
     if ( ret < 0 )
         goto bad_copy;
-#ifdef CONFIG_PAGE_DEDUP
-    if ( tmem_dedup_enabled() && !is_persistent(pool) )
-    {
-        if ( pcd_associate(pgp,NULL,0) == -ENOMEM )
-            goto failed_dup;
-    }
-#endif
 
 done:
     /* Successfully replaced data, clean up and return success. */
@@ -1538,17 +1228,6 @@ copy_uncompressed:
     if ( ret < 0 )
         goto bad_copy;
 
-#ifdef CONFIG_TMEM_DEDUP
-    if ( tmem_dedup_enabled() && !is_persistent(pool) )
-    {
-        if ( pcd_associate(pgp, NULL, 0) == -ENOMEM )
-        {
-            ret = -ENOMEM;
-            goto del_pgp_from_obj;
-        }
-    }
-#endif
-
 insert_page:
     if ( !is_persistent(pool) )
     {
@@ -1635,12 +1314,6 @@ static int do_tmem_get(struct tmem_pool *pool,
         return 0;
     }
     ASSERT(pgp->size != -1);
-#ifdef CONFIG_TMEM_DEDUP
-    if ( tmem_dedup_enabled() && !is_persistent(pool) &&
-              pgp->firstbyte != NOT_SHAREABLE )
-        rc = pcd_copy_to_client(cmfn, pgp);
-    else
-#endif
     if ( pgp->size != 0 )
     {
         rc = tmem_decompress_to_client(cmfn, pgp->cdata, pgp->size, clibuf);
@@ -2402,44 +2075,12 @@ static int __init init_tmem(void)
     if ( !tmem_enabled() )
         return 0;
 
-#ifdef CONFIG_TMEM_DEDUP
-    if ( tmem_dedup_enabled() )
-    {
-        unsigned int i;
-
-        for (i = 0; i < 256; i++ )
-        {
-            pcd_tree_roots[i] = RB_ROOT;
-            rwlock_init(&pcd_tree_rwlocks[i]);
-        }
-    }
-#endif
     if ( !tmem_mempool_init() )
         return 0;
 
     if ( tmem_init() )
     {
-        printk("tmem: initialized comp=%d dedup=%d tze=%d\n",
-               tmem_compression_enabled(),
-#ifdef CONFIG_TMEM_DEDUP
-               tmem_dedup_enabled(),
-#else
-               0,
-#endif
-#ifdef CONFIG_TMEM_TZE
-               tmem_tze_enabled()
-#else
-               0
-#endif
-            );
-
-#if defined(CONFIG_TMEM_DEDUP) && defined(CONFIG_TMEM_TZE)
-        if ( 
tmem_dedup_enabled()&&tmem_compression_enabled()&&tmem_tze_enabled() )
-        {
-            tmem_tze_disable();
-            printk("tmem: tze and compression not compatible, disabling 
tze\n");
-        }
-#endif
+        printk("tmem: initialized comp=%d\n", tmem_compression_enabled());
         tmem_initialized = 1;
     }
     else
diff --git a/xen/common/tmem_control.c b/xen/common/tmem_control.c
index 565e50b..cda327b 100644
--- a/xen/common/tmem_control.c
+++ b/xen/common/tmem_control.c
@@ -274,15 +274,6 @@ static int __tmemc_set_var(struct client *client, uint32_t 
subop, uint32_t arg1)
         atomic_add(client->weight,&tmem_global.client_weight_total);
         break;
     case XEN_SYSCTL_TMEM_OP_SET_COMPRESS:
-#ifdef CONFIG_TMEM_DEDUP
-        if ( tmem_dedup_enabled() )
-        {
-            tmem_client_warn("tmem: compression %s for all %ss, cannot be 
changed when tmem_dedup is enabled\n",
-                            tmem_compression_enabled() ? "enabled" : 
"disabled",
-                            tmem_client_str);
-            return -1;
-        }
-#endif
         client->compress = arg1 ? 1 : 0;
         tmem_client_info("tmem: compression %s for %s=%d\n",
             arg1 ? "enabled" : "disabled",tmem_cli_id_str,cli_id);
diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
index d42deef..84ae7fd 100644
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -20,16 +20,6 @@ boolean_param("tmem", opt_tmem);
 bool_t __read_mostly opt_tmem_compress = 0;
 boolean_param("tmem_compress", opt_tmem_compress);
 
-#ifdef CONFIG_TMEM_DEDUP
-bool_t __read_mostly opt_tmem_dedup = 0;
-boolean_param("tmem_dedup", opt_tmem_dedup);
-#endif
-
-#ifdef CONFIG_TMEM_TZE
-bool_t __read_mostly opt_tmem_tze = 0;
-boolean_param("tmem_tze", opt_tmem_tze);
-#endif
-
 bool_t __read_mostly opt_tmem_shared_auth = 0;
 boolean_param("tmem_shared_auth", opt_tmem_shared_auth);
 
@@ -220,30 +210,6 @@ int tmem_decompress_to_client(xen_pfn_t cmfn, void 
*tmem_va,
     return 1;
 }
 
-#ifdef CONFIG_TMEM_TZE
-int tmem_copy_tze_to_client(xen_pfn_t cmfn, void *tmem_va,
-                                    pagesize_t len)
-{
-    void *cli_va;
-    unsigned long cli_mfn;
-    struct page_info *cli_pfp = NULL;
-
-    ASSERT(!(len & (sizeof(uint64_t)-1)));
-    ASSERT(len <= PAGE_SIZE);
-    ASSERT(len > 0 || tmem_va == NULL);
-    cli_va = cli_get_page(cmfn, &cli_mfn, &cli_pfp, 1);
-    if ( cli_va == NULL )
-        return -EFAULT;
-    if ( len > 0 )
-        memcpy((char *)cli_va,(char *)tmem_va,len);
-    if ( len < PAGE_SIZE )
-        memset((char *)cli_va+len,0,PAGE_SIZE-len);
-    cli_put_page(cli_va, cli_pfp, cli_mfn, 1);
-    smp_mb();
-    return 1;
-}
-#endif
-
 /******************  XEN-SPECIFIC HOST INITIALIZATION ********************/
 static int dstmem_order, workmem_order;
 
diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
index 3be8001..7a1bb03 100644
--- a/xen/include/xen/tmem_xen.h
+++ b/xen/include/xen/tmem_xen.h
@@ -41,27 +41,6 @@ static inline bool_t tmem_compression_enabled(void)
     return opt_tmem_compress;
 }
 
-#ifdef CONFIG_TMEM_DEDUP
-extern bool_t opt_tmem_dedup;
-static inline bool_t tmem_dedup_enabled(void)
-{
-    return opt_tmem_dedup;
-}
-#endif
-
-#ifdef CONFIG_TMEM_TZE
-extern bool_t opt_tmem_tze;
-static inline bool_t tmem_tze_enabled(void)
-{
-    return opt_tmem_tze;
-}
-
-static inline void tmem_tze_disable(void)
-{
-    opt_tmem_tze = 0;
-}
-#endif
-
 extern bool_t opt_tmem_shared_auth;
 static inline bool_t tmem_shared_auth(void)
 {
@@ -196,105 +175,6 @@ static inline struct client 
*tmem_client_from_cli_id(domid_t cli_id)
     return c;
 }
 
-#ifdef CONFIG_TMEM_DEDUP
-static inline uint8_t tmem_get_first_byte(struct page_info *pfp)
-{
-    const uint8_t *p = __map_domain_page(pfp);
-    uint8_t byte = p[0];
-
-    unmap_domain_page(p);
-
-    return byte;
-}
-
-static inline int tmem_page_cmp(struct page_info *pfp1, struct page_info *pfp2)
-{
-    const uint64_t *p1 = __map_domain_page(pfp1);
-    const uint64_t *p2 = __map_domain_page(pfp2);
-    int rc = memcmp(p1, p2, PAGE_SIZE);
-
-    unmap_domain_page(p2);
-    unmap_domain_page(p1);
-
-    return rc;
-}
-
-static inline int tmem_pcd_cmp(void *va1, pagesize_t len1, void *va2, 
pagesize_t len2)
-{
-    const char *p1 = (char *)va1;
-    const char *p2 = (char *)va2;
-    pagesize_t i;
-
-    ASSERT(len1 <= PAGE_SIZE);
-    ASSERT(len2 <= PAGE_SIZE);
-    if ( len1 < len2 )
-        return -1;
-    if ( len1 > len2 )
-        return 1;
-    ASSERT(len1 == len2);
-    for ( i = len2; i && *p1 == *p2; i--, p1++, p2++ );
-    if ( !i )
-        return 0;
-    if ( *p1 < *p2 )
-        return -1;
-    return 1;
-}
-
-static inline int tmem_tze_pfp_cmp(struct page_info *pfp1, pagesize_t pfp_len,
-                                   void *tva, const pagesize_t tze_len)
-{
-    const uint64_t *p1 = __map_domain_page(pfp1);
-    const uint64_t *p2 = tze_len == PAGE_SIZE ?
-        __map_domain_page((struct page_info *)tva) : tva;
-    int rc;
-
-    ASSERT(pfp_len <= PAGE_SIZE);
-    ASSERT(!(pfp_len & (sizeof(uint64_t)-1)));
-    ASSERT(tze_len <= PAGE_SIZE);
-    ASSERT(!(tze_len & (sizeof(uint64_t)-1)));
-    if ( pfp_len < tze_len )
-        rc = -1;
-    else if ( pfp_len > tze_len )
-        rc = 1;
-    else
-        rc = memcmp(p1, p2, tze_len);
-
-    if ( tze_len == PAGE_SIZE )
-        unmap_domain_page(p2);
-    unmap_domain_page(p1);
-
-    return rc;
-}
-
-/* return the size of the data in the pfp, ignoring trailing zeroes and
- * rounded up to the nearest multiple of 8 */
-static inline pagesize_t tmem_tze_pfp_scan(struct page_info *pfp)
-{
-    const uint64_t *const page = __map_domain_page(pfp);
-    const uint64_t *p = page;
-    pagesize_t bytecount = PAGE_SIZE;
-    pagesize_t len = PAGE_SIZE/sizeof(uint64_t);
-
-    p += len;
-    while ( len-- && !*--p )
-        bytecount -= sizeof(uint64_t);
-
-    unmap_domain_page(page);
-
-    return bytecount;
-}
-
-static inline void tmem_tze_copy_from_pfp(void *tva, struct page_info *pfp, 
pagesize_t len)
-{
-    const uint64_t *p = __map_domain_page(pfp);
-
-    ASSERT(!(len & (sizeof(uint64_t)-1)));
-    memcpy(tva, p, len);
-
-    unmap_domain_page(p);
-}
-#endif
-
 /* these typedefs are in the public/tmem.h interface
 typedef XEN_GUEST_HANDLE(void) cli_mfn_t;
 typedef XEN_GUEST_HANDLE(char) cli_va_t;
@@ -342,9 +222,6 @@ int tmem_compress_from_client(xen_pfn_t, void **, size_t *,
 
 int tmem_copy_from_client(struct page_info *, xen_pfn_t, tmem_cli_va_param_t);
 int tmem_copy_to_client(xen_pfn_t, struct page_info *, tmem_cli_va_param_t);
-#ifdef CONFIG_TMEM_TZE
-extern int tmem_copy_tze_to_client(xen_pfn_t cmfn, void *tmem_va, pagesize_t 
len);
-#endif
 
 #define tmem_client_err(fmt, args...)  printk(XENLOG_G_ERR fmt, ##args)
 #define tmem_client_warn(fmt, args...) printk(XENLOG_G_WARNING fmt, ##args)
-- 
2.4.11


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.