WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH] don't use mlock() with Solaris tools

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH] don't use mlock() with Solaris tools
From: John Levon <levon@xxxxxxxxxxxxxxxxx>
Date: Fri, 20 Oct 2006 04:42:47 +0100
Delivery-date: Fri, 20 Oct 2006 13:54:37 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mutt/1.5.9i
# HG changeset patch
# User john.levon@xxxxxxx
# Date 1161315500 25200
# Node ID ad404ee927926b8cc4a6498b180e78f0939eb2eb
# Parent  3685871c6f7930c7d35baea7cee4f51d00415796
on solaris: mlock requires a page aligned address and mlock doesn't ensure the 
pages won't minor page fault; so don't use it on solaris.

Signed-off-by: Mark Johnson <mark.johnson@xxxxxxx>

diff --git a/tools/libxc/xc_acm.c b/tools/libxc/xc_acm.c
--- a/tools/libxc/xc_acm.c
+++ b/tools/libxc/xc_acm.c
@@ -24,12 +24,12 @@ int xc_acm_op(int xc_handle, int cmd, vo
     hypercall.arg[0] = cmd;
     hypercall.arg[1] = (unsigned long) arg;
 
-    if (mlock(arg, arg_size) != 0) {
-        PERROR("xc_acm_op: arg mlock failed");
+    if (lock_pages(arg, arg_size) != 0) {
+        PERROR("xc_acm_op: arg lock failed");
         goto out;
     }
     ret = do_xen_hypercall(xc_handle, &hypercall);
-    safe_munlock(arg, arg_size);
+    unlock_pages(arg, arg_size);
  out:
     return ret;
 }
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -72,7 +72,7 @@ int xc_domain_shutdown(int xc_handle,
     arg.domain_id = domid;
     arg.reason = reason;
 
-    if ( mlock(&arg, sizeof(arg)) != 0 )
+    if ( lock_pages(&arg, sizeof(arg)) != 0 )
     {
         PERROR("Could not lock memory for Xen hypercall");
         goto out1;
@@ -80,7 +80,7 @@ int xc_domain_shutdown(int xc_handle,
 
     ret = do_xen_hypercall(xc_handle, &hypercall);
 
-    safe_munlock(&arg, sizeof(arg));
+    unlock_pages(&arg, sizeof(arg));
 
  out1:
     return ret;
@@ -103,7 +103,7 @@ int xc_vcpu_setaffinity(int xc_handle,
                          (uint8_t *)&cpumap);
     domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8;
     
-    if ( mlock(&cpumap, sizeof(cpumap)) != 0 )
+    if ( lock_pages(&cpumap, sizeof(cpumap)) != 0 )
     {
         PERROR("Could not lock memory for Xen hypercall");
         goto out;
@@ -111,7 +111,7 @@ int xc_vcpu_setaffinity(int xc_handle,
 
     ret = do_domctl(xc_handle, &domctl);
 
-    safe_munlock(&cpumap, sizeof(cpumap));
+    unlock_pages(&cpumap, sizeof(cpumap));
 
  out:
     return ret;
@@ -134,7 +134,7 @@ int xc_vcpu_getaffinity(int xc_handle,
                          (uint8_t *)cpumap);
     domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(*cpumap) * 8;
     
-    if ( mlock(cpumap, sizeof(*cpumap)) != 0 )
+    if ( lock_pages(cpumap, sizeof(*cpumap)) != 0 )
     {
         PERROR("Could not lock memory for Xen hypercall");
         goto out;
@@ -142,7 +142,7 @@ int xc_vcpu_getaffinity(int xc_handle,
 
     ret = do_domctl(xc_handle, &domctl);
 
-    safe_munlock(cpumap, sizeof(*cpumap));
+    unlock_pages(cpumap, sizeof(*cpumap));
 
  out:
     return ret;
@@ -213,7 +213,7 @@ int xc_domain_getinfolist(int xc_handle,
     int ret = 0;
     DECLARE_SYSCTL;
 
-    if ( mlock(info, max_domains*sizeof(xc_domaininfo_t)) != 0 )
+    if ( lock_pages(info, max_domains*sizeof(xc_domaininfo_t)) != 0 )
         return -1;
 
     sysctl.cmd = XEN_SYSCTL_getdomaininfolist;
@@ -226,8 +226,7 @@ int xc_domain_getinfolist(int xc_handle,
     else
         ret = sysctl.u.getdomaininfolist.num_domains;
 
-    if ( munlock(info, max_domains*sizeof(xc_domaininfo_t)) != 0 )
-        ret = -1;
+    unlock_pages(info, max_domains*sizeof(xc_domaininfo_t));
 
     return ret;
 }
@@ -245,12 +244,12 @@ int xc_vcpu_getcontext(int xc_handle,
     domctl.u.vcpucontext.vcpu   = (uint16_t)vcpu;
     set_xen_guest_handle(domctl.u.vcpucontext.ctxt, ctxt);
 
-    if ( (rc = mlock(ctxt, sizeof(*ctxt))) != 0 )
+    if ( (rc = lock_pages(ctxt, sizeof(*ctxt))) != 0 )
         return rc;
 
     rc = do_domctl(xc_handle, &domctl);
 
-    safe_munlock(ctxt, sizeof(*ctxt));
+    unlock_pages(ctxt, sizeof(*ctxt));
 
     return rc;
 }
@@ -512,12 +511,12 @@ int xc_vcpu_setcontext(int xc_handle,
     domctl.u.vcpucontext.vcpu = vcpu;
     set_xen_guest_handle(domctl.u.vcpucontext.ctxt, ctxt);
 
-    if ( (rc = mlock(ctxt, sizeof(*ctxt))) != 0 )
+    if ( (rc = lock_pages(ctxt, sizeof(*ctxt))) != 0 )
         return rc;
 
     rc = do_domctl(xc_handle, &domctl);
 
-    safe_munlock(ctxt, sizeof(*ctxt));
+    unlock_pages(ctxt, sizeof(*ctxt));
 
     return rc;
 
diff --git a/tools/libxc/xc_evtchn.c b/tools/libxc/xc_evtchn.c
--- a/tools/libxc/xc_evtchn.c
+++ b/tools/libxc/xc_evtchn.c
@@ -18,16 +18,16 @@ static int do_evtchn_op(int xc_handle, i
     hypercall.arg[0] = cmd;
     hypercall.arg[1] = (unsigned long)arg;
 
-    if ( mlock(arg, arg_size) != 0 )
+    if ( lock_pages(arg, arg_size) != 0 )
     {
-        PERROR("do_evtchn_op: arg mlock failed");
+        PERROR("do_evtchn_op: arg lock failed");
         goto out;
     }
 
     if ((ret = do_xen_hypercall(xc_handle, &hypercall)) < 0)
         ERROR("do_evtchn_op: HYPERVISOR_event_channel_op failed: %d", ret);
 
-    safe_munlock(arg, arg_size);
+    unlock_pages(arg, arg_size);
  out:
     return ret;
 }
diff --git a/tools/libxc/xc_hvm_build.c b/tools/libxc/xc_hvm_build.c
--- a/tools/libxc/xc_hvm_build.c
+++ b/tools/libxc/xc_hvm_build.c
@@ -38,13 +38,13 @@ static void xc_set_hvm_param(int handle,
     arg.domid = dom;
     arg.index = param;
     arg.value = value;
-    if ( mlock(&arg, sizeof(arg)) != 0 )
+    if ( lock_pages(&arg, sizeof(arg)) != 0 )
     {
         PERROR("Could not lock memory for set parameter");
         return;
     }
     rc = do_xen_hypercall(handle, &hypercall);
-    safe_munlock(&arg, sizeof(arg));
+    unlock_pages(&arg, sizeof(arg));
     if (rc < 0)
         PERROR("set HVM parameter failed (%d)", rc);
 }
@@ -403,7 +403,7 @@ static int xc_hvm_build_internal(int xc_
         goto error_out;
     }
 
-    if ( mlock(&st_ctxt, sizeof(st_ctxt) ) )
+    if ( lock_pages(&st_ctxt, sizeof(st_ctxt) ) )
     {
         PERROR("%s: ctxt mlock failed", __func__);
         return 1;
diff --git a/tools/libxc/xc_linux_build.c b/tools/libxc/xc_linux_build.c
--- a/tools/libxc/xc_linux_build.c
+++ b/tools/libxc/xc_linux_build.c
@@ -1140,9 +1140,9 @@ static int xc_linux_build_internal(int x
     memset(&st_ctxt, 0, sizeof(st_ctxt));
 #endif
 
-    if ( mlock(&st_ctxt, sizeof(st_ctxt) ) )
-    {
-        PERROR("%s: ctxt mlock failed", __func__);
+    if ( lock_pages(&st_ctxt, sizeof(st_ctxt) ) )
+    {
+        PERROR("%s: ctxt lock failed", __func__);
         return 1;
     }
 
diff --git a/tools/libxc/xc_linux_restore.c b/tools/libxc/xc_linux_restore.c
--- a/tools/libxc/xc_linux_restore.c
+++ b/tools/libxc/xc_linux_restore.c
@@ -162,9 +162,9 @@ int xc_linux_restore(int xc_handle, int 
         return 1;
     }
 
-    if (mlock(&ctxt, sizeof(ctxt))) {
+    if (lock_pages(&ctxt, sizeof(ctxt))) {
         /* needed for build domctl, but might as well do early */
-        ERROR("Unable to mlock ctxt");
+        ERROR("Unable to lock ctxt");
         return 1;
     }
 
@@ -251,8 +251,8 @@ int xc_linux_restore(int xc_handle, int 
         goto out;
     }
 
-    if (mlock(region_mfn, sizeof(xen_pfn_t) * MAX_BATCH_SIZE)) {
-        ERROR("Could not mlock region_mfn");
+    if (lock_pages(region_mfn, sizeof(xen_pfn_t) * MAX_BATCH_SIZE)) {
+        ERROR("Could not lock region_mfn");
         goto out;
     }
 
diff --git a/tools/libxc/xc_linux_save.c b/tools/libxc/xc_linux_save.c
--- a/tools/libxc/xc_linux_save.c
+++ b/tools/libxc/xc_linux_save.c
@@ -628,8 +628,8 @@ int xc_linux_save(int xc_handle, int io_
         return 1;
     }
 
-    if (mlock(&ctxt, sizeof(ctxt))) {
-        ERROR("Unable to mlock ctxt");
+    if (lock_pages(&ctxt, sizeof(ctxt))) {
+        ERROR("Unable to lock ctxt");
         return 1;
     }
 
@@ -767,14 +767,14 @@ int xc_linux_save(int xc_handle, int io_
 
     memset(to_send, 0xff, BITMAP_SIZE);
 
-    if (mlock(to_send, BITMAP_SIZE)) {
-        ERROR("Unable to mlock to_send");
+    if (lock_pages(to_send, BITMAP_SIZE)) {
+        ERROR("Unable to lock to_send");
         return 1;
     }
 
     /* (to fix is local only) */
-    if (mlock(to_skip, BITMAP_SIZE)) {
-        ERROR("Unable to mlock to_skip");
+    if (lock_pages(to_skip, BITMAP_SIZE)) {
+        ERROR("Unable to lock to_skip");
         return 1;
     }
 
@@ -790,8 +790,8 @@ int xc_linux_save(int xc_handle, int io_
         goto out;
     }
 
-    if (mlock(pfn_type, MAX_BATCH_SIZE * sizeof(*pfn_type))) {
-        ERROR("Unable to mlock");
+    if (lock_pages(pfn_type, MAX_BATCH_SIZE * sizeof(*pfn_type))) {
+        ERROR("Unable to lock");
         goto out;
     }
 
diff --git a/tools/libxc/xc_misc.c b/tools/libxc/xc_misc.c
--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -21,13 +21,13 @@ int xc_readconsolering(int xc_handle,
     sysctl.u.readconsole.count  = nr_chars;
     sysctl.u.readconsole.clear  = clear;
 
-    if ( (ret = mlock(buffer, nr_chars)) != 0 )
+    if ( (ret = lock_pages(buffer, nr_chars)) != 0 )
         return ret;
 
     if ( (ret = do_sysctl(xc_handle, &sysctl)) == 0 )
         *pnr_chars = sysctl.u.readconsole.count;
 
-    safe_munlock(buffer, nr_chars);
+    unlock_pages(buffer, nr_chars);
 
     return ret;
 }
diff --git a/tools/libxc/xc_private.c b/tools/libxc/xc_private.c
--- a/tools/libxc/xc_private.c
+++ b/tools/libxc/xc_private.c
@@ -7,7 +7,23 @@
 #include <inttypes.h>
 #include "xc_private.h"
 
-/* NB: arr must be mlock'ed */
+int lock_pages(void *addr, size_t len)
+{
+      int e = 0;
+#ifndef __sun__
+      e = mlock(addr, len);
+#endif
+      return (e);
+}
+
+void unlock_pages(void *addr, size_t len)
+{
+#ifndef __sun__
+       safe_munlock(addr, len);
+#endif
+}
+
+/* NB: arr must be locked */
 int xc_get_pfn_type_batch(int xc_handle,
                           uint32_t dom, int num, unsigned long *arr)
 {
@@ -51,7 +67,7 @@ int xc_mmuext_op(
     hypercall.arg[2] = (unsigned long)0;
     hypercall.arg[3] = (unsigned long)dom;
 
-    if ( mlock(op, nr_ops*sizeof(*op)) != 0 )
+    if ( lock_pages(op, nr_ops*sizeof(*op)) != 0 )
     {
         PERROR("Could not lock memory for Xen hypercall");
         goto out1;
@@ -59,7 +75,7 @@ int xc_mmuext_op(
 
     ret = do_xen_hypercall(xc_handle, &hypercall);
 
-    safe_munlock(op, nr_ops*sizeof(*op));
+    unlock_pages(op, nr_ops*sizeof(*op));
 
  out1:
     return ret;
@@ -79,9 +95,9 @@ static int flush_mmu_updates(int xc_hand
     hypercall.arg[2] = 0;
     hypercall.arg[3] = mmu->subject;
 
-    if ( mlock(mmu->updates, sizeof(mmu->updates)) != 0 )
-    {
-        PERROR("flush_mmu_updates: mmu updates mlock failed");
+    if ( lock_pages(mmu->updates, sizeof(mmu->updates)) != 0 )
+    {
+        PERROR("flush_mmu_updates: mmu updates lock_pages failed");
         err = 1;
         goto out;
     }
@@ -94,7 +110,7 @@ static int flush_mmu_updates(int xc_hand
 
     mmu->idx = 0;
 
-    safe_munlock(mmu->updates, sizeof(mmu->updates));
+    unlock_pages(mmu->updates, sizeof(mmu->updates));
 
  out:
     return err;
@@ -149,62 +165,62 @@ int xc_memory_op(int xc_handle,
     case XENMEM_increase_reservation:
     case XENMEM_decrease_reservation:
     case XENMEM_populate_physmap:
-        if ( mlock(reservation, sizeof(*reservation)) != 0 )
-        {
-            PERROR("Could not mlock");
+        if ( lock_pages(reservation, sizeof(*reservation)) != 0 )
+        {
+            PERROR("Could not lock");
             goto out1;
         }
         get_xen_guest_handle(extent_start, reservation->extent_start);
         if ( (extent_start != NULL) &&
-             (mlock(extent_start,
+             (lock_pages(extent_start,
                     reservation->nr_extents * sizeof(xen_pfn_t)) != 0) )
         {
-            PERROR("Could not mlock");
-            safe_munlock(reservation, sizeof(*reservation));
+            PERROR("Could not lock");
+            unlock_pages(reservation, sizeof(*reservation));
             goto out1;
         }
         break;
     case XENMEM_machphys_mfn_list:
-        if ( mlock(xmml, sizeof(*xmml)) != 0 )
-        {
-            PERROR("Could not mlock");
+        if ( lock_pages(xmml, sizeof(*xmml)) != 0 )
+        {
+            PERROR("Could not lock");
             goto out1;
         }
         get_xen_guest_handle(extent_start, xmml->extent_start);
-        if ( mlock(extent_start,
+        if ( lock_pages(extent_start,
                    xmml->max_extents * sizeof(xen_pfn_t)) != 0 )
         {
-            PERROR("Could not mlock");
-            safe_munlock(xmml, sizeof(*xmml));
+            PERROR("Could not lock");
+            unlock_pages(xmml, sizeof(*xmml));
             goto out1;
         }
         break;
     case XENMEM_add_to_physmap:
-        if ( mlock(arg, sizeof(struct xen_add_to_physmap)) )
-        {
-            PERROR("Could not mlock");
+        if ( lock_pages(arg, sizeof(struct xen_add_to_physmap)) )
+        {
+            PERROR("Could not lock");
             goto out1;
         }
         break;
     case XENMEM_translate_gpfn_list:
-        if ( mlock(trans, sizeof(*trans)) != 0 )
-        {
-            PERROR("Could not mlock");
+        if ( lock_pages(trans, sizeof(*trans)) != 0 )
+        {
+            PERROR("Could not lock");
             goto out1;
         }
         get_xen_guest_handle(gpfn_list, trans->gpfn_list);
-        if ( mlock(gpfn_list, trans->nr_gpfns * sizeof(xen_pfn_t)) != 0 )
-        {
-            PERROR("Could not mlock");
-            safe_munlock(trans, sizeof(*trans));
+        if ( lock_pages(gpfn_list, trans->nr_gpfns * sizeof(xen_pfn_t)) != 0 )
+        {
+            PERROR("Could not lock");
+            unlock_pages(trans, sizeof(*trans));
             goto out1;
         }
         get_xen_guest_handle(mfn_list, trans->mfn_list);
-        if ( mlock(mfn_list, trans->nr_gpfns * sizeof(xen_pfn_t)) != 0 )
-        {
-            PERROR("Could not mlock");
-            safe_munlock(gpfn_list, trans->nr_gpfns * sizeof(xen_pfn_t));
-            safe_munlock(trans, sizeof(*trans));
+        if ( lock_pages(mfn_list, trans->nr_gpfns * sizeof(xen_pfn_t)) != 0 )
+        {
+            PERROR("Could not lock");
+            unlock_pages(gpfn_list, trans->nr_gpfns * sizeof(xen_pfn_t));
+            unlock_pages(trans, sizeof(*trans));
             goto out1;
         }
         break;
@@ -217,27 +233,27 @@ int xc_memory_op(int xc_handle,
     case XENMEM_increase_reservation:
     case XENMEM_decrease_reservation:
     case XENMEM_populate_physmap:
-        safe_munlock(reservation, sizeof(*reservation));
+        unlock_pages(reservation, sizeof(*reservation));
         get_xen_guest_handle(extent_start, reservation->extent_start);
         if ( extent_start != NULL )
-            safe_munlock(extent_start,
+            unlock_pages(extent_start,
                          reservation->nr_extents * sizeof(xen_pfn_t));
         break;
     case XENMEM_machphys_mfn_list:
-        safe_munlock(xmml, sizeof(*xmml));
+        unlock_pages(xmml, sizeof(*xmml));
         get_xen_guest_handle(extent_start, xmml->extent_start);
-        safe_munlock(extent_start,
+        unlock_pages(extent_start,
                      xmml->max_extents * sizeof(xen_pfn_t));
         break;
     case XENMEM_add_to_physmap:
-        safe_munlock(arg, sizeof(struct xen_add_to_physmap));
+        unlock_pages(arg, sizeof(struct xen_add_to_physmap));
         break;
     case XENMEM_translate_gpfn_list:
             get_xen_guest_handle(mfn_list, trans->mfn_list);
-            safe_munlock(mfn_list, trans->nr_gpfns * sizeof(xen_pfn_t));
+            unlock_pages(mfn_list, trans->nr_gpfns * sizeof(xen_pfn_t));
             get_xen_guest_handle(gpfn_list, trans->gpfn_list);
-            safe_munlock(gpfn_list, trans->nr_gpfns * sizeof(xen_pfn_t));
-            safe_munlock(trans, sizeof(*trans));
+            unlock_pages(gpfn_list, trans->nr_gpfns * sizeof(xen_pfn_t));
+            unlock_pages(trans, sizeof(*trans));
         break;
     }
 
@@ -279,15 +295,15 @@ int xc_get_pfn_list(int xc_handle,
     memset(pfn_buf, 0, max_pfns * sizeof(xen_pfn_t));
 #endif
 
-    if ( mlock(pfn_buf, max_pfns * sizeof(xen_pfn_t)) != 0 )
-    {
-        PERROR("xc_get_pfn_list: pfn_buf mlock failed");
+    if ( lock_pages(pfn_buf, max_pfns * sizeof(xen_pfn_t)) != 0 )
+    {
+        PERROR("xc_get_pfn_list: pfn_buf lock failed");
         return -1;
     }
 
     ret = do_domctl(xc_handle, &domctl);
 
-    safe_munlock(pfn_buf, max_pfns * sizeof(xen_pfn_t));
+    unlock_pages(pfn_buf, max_pfns * sizeof(xen_pfn_t));
 
 #if 0
 #ifdef DEBUG
@@ -419,7 +435,7 @@ int xc_version(int xc_handle, int cmd, v
         break;
     }
 
-    if ( (argsize != 0) && (mlock(arg, argsize) != 0) )
+    if ( (argsize != 0) && (lock_pages(arg, argsize) != 0) )
     {
         PERROR("Could not lock memory for version hypercall");
         return -ENOMEM;
@@ -433,7 +449,7 @@ int xc_version(int xc_handle, int cmd, v
     rc = do_xen_version(xc_handle, cmd, arg);
 
     if ( argsize != 0 )
-        safe_munlock(arg, argsize);
+        unlock_pages(arg, argsize);
 
     return rc;
 }
diff --git a/tools/libxc/xc_private.h b/tools/libxc/xc_private.h
--- a/tools/libxc/xc_private.h
+++ b/tools/libxc/xc_private.h
@@ -66,6 +66,9 @@ do {                                    
     errno = __saved_errno;                      \
 } while (0)
 
+int lock_pages(void *addr, size_t len);
+void unlock_pages(void *addr, size_t len);
+
 #define PERROR(_m, _a...)                               \
 do {                                                    \
     int __saved_errno = errno;                          \
@@ -104,7 +107,7 @@ static inline int do_domctl(int xc_handl
     hypercall.op     = __HYPERVISOR_domctl;
     hypercall.arg[0] = (unsigned long)domctl;
 
-    if ( mlock(domctl, sizeof(*domctl)) != 0 )
+    if ( lock_pages(domctl, sizeof(*domctl)) != 0 )
     {
         PERROR("Could not lock memory for Xen hypercall");
         goto out1;
@@ -117,7 +120,7 @@ static inline int do_domctl(int xc_handl
                     " rebuild the user-space tool set?\n");
     }
 
-    safe_munlock(domctl, sizeof(*domctl));
+    unlock_pages(domctl, sizeof(*domctl));
 
  out1:
     return ret;
@@ -133,7 +136,7 @@ static inline int do_sysctl(int xc_handl
     hypercall.op     = __HYPERVISOR_sysctl;
     hypercall.arg[0] = (unsigned long)sysctl;
 
-    if ( mlock(sysctl, sizeof(*sysctl)) != 0 )
+    if ( lock_pages(sysctl, sizeof(*sysctl)) != 0 )
     {
         PERROR("Could not lock memory for Xen hypercall");
         goto out1;
@@ -146,7 +149,7 @@ static inline int do_sysctl(int xc_handl
                     " rebuild the user-space tool set?\n");
     }
 
-    safe_munlock(sysctl, sizeof(*sysctl));
+    unlock_pages(sysctl, sizeof(*sysctl));
 
  out1:
     return ret;
diff --git a/tools/libxc/xc_tbuf.c b/tools/libxc/xc_tbuf.c
--- a/tools/libxc/xc_tbuf.c
+++ b/tools/libxc/xc_tbuf.c
@@ -104,7 +104,7 @@ int xc_tbuf_set_cpu_mask(int xc_handle, 
     set_xen_guest_handle(sysctl.u.tbuf_op.cpu_mask.bitmap, (uint8_t *)&mask);
     sysctl.u.tbuf_op.cpu_mask.nr_cpus = sizeof(mask) * 8;
 
-    if ( mlock(&mask, sizeof(mask)) != 0 )
+    if ( lock_pages(&mask, sizeof(mask)) != 0 )
     {
         PERROR("Could not lock memory for Xen hypercall");
         goto out;
@@ -112,7 +112,7 @@ int xc_tbuf_set_cpu_mask(int xc_handle, 
 
     ret = do_sysctl(xc_handle, &sysctl);
 
-    safe_munlock(&mask, sizeof(mask));
+    unlock_pages(&mask, sizeof(mask));
 
  out:
     return ret;
diff --git a/tools/libxc/xg_private.c b/tools/libxc/xg_private.c
--- a/tools/libxc/xg_private.c
+++ b/tools/libxc/xg_private.c
@@ -10,6 +10,22 @@
 #include <strings.h>
 
 #include "xg_private.h"
+
+int lock_pages(void *addr, size_t len)
+{
+    int e = 0;
+#ifndef __sun__
+    e = mlock(addr, len);
+#endif
+    return (e);
+}
+
+void unlock_pages(void *addr, size_t len)
+{
+#ifndef __sun__
+    safe_munlock(addr, len);
+#endif
+}
 
 char *xc_read_image(const char *filename, unsigned long *size)
 {
diff --git a/tools/misc/xenperf.c b/tools/misc/xenperf.c
--- a/tools/misc/xenperf.c
+++ b/tools/misc/xenperf.c
@@ -17,6 +17,22 @@
 #include <sys/mman.h>
 #include <errno.h>
 #include <string.h>
+
+int lock_pages(void *addr, size_t len)
+{
+    int e = 0;
+#ifndef __sun__
+    e = mlock(addr, len);
+#endif
+    return (e);
+}
+
+void unlock_pages(void *addr, size_t len)
+{
+#ifndef __sun__
+       munlock(addr, len);
+#endif
+}
 
 int main(int argc, char *argv[])
 {
@@ -87,11 +103,11 @@ int main(int argc, char *argv[])
        pcv = malloc(sizeof(*pcv) * num_val);
 
     if ( pcd == NULL
-                || mlock(pcd, sizeof(*pcd) * num_desc) != 0
+                || lock_pages(pcd, sizeof(*pcd) * num_desc) != 0
                 || pcv == NULL
-                || mlock(pcd, sizeof(*pcv) * num_val) != 0)
+                || lock_pages(pcd, sizeof(*pcv) * num_val) != 0)
     {
-        fprintf(stderr, "Could not alloc or mlock buffers: %d (%s)\n",
+        fprintf(stderr, "Could not alloc or lock buffers: %d (%s)\n",
                 errno, strerror(errno));
         exit(-1);
     }
@@ -104,8 +120,8 @@ int main(int argc, char *argv[])
         return 1;
     }
 
-    munlock(pcd, sizeof(*pcd) * num_desc);
-    munlock(pcv, sizeof(*pcv) * num_val);
+    unlock_pages(pcd, sizeof(*pcd) * num_desc);
+    unlock_pages(pcv, sizeof(*pcv) * num_val);
 
        val = pcv;
     for ( i = 0; i < num_desc; i++ )

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel