WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86: Allow bitop functions to be applied

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86: Allow bitop functions to be applied only to fields of at least 4
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 20 Mar 2008 05:40:08 -0700
Delivery-date: Thu, 20 Mar 2008 05:40:18 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1205676694 0
# Node ID af33f2054f47b3cdc9de37e567b11986ca22a7f1
# Parent  f33328217eee1a66bf2a874ff1a42b62c21e42bc
x86: Allow bitop functions to be applied only to fields of at least 4
bytes. Otherwise the 'longword' processor instructions used will
overlap with adjacent fields with unpredictable consequences.

This change requires some code fixup and just a few casts (mainly when
operating on guest-shared fields which cannot be changed, and which by
observation are clearly safe).

Based on ideas from Jan Beulich <jbeulich@xxxxxxxxxx>

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/domain.c               |    4 
 xen/arch/x86/hvm/hvm.c              |    4 
 xen/arch/x86/hvm/svm/vmcb.c         |   14 -
 xen/arch/x86/hvm/vlapic.c           |   26 +-
 xen/arch/x86/hvm/vmx/vmcs.c         |   12 -
 xen/arch/x86/hvm/vmx/vpmu_core2.c   |   16 -
 xen/arch/x86/irq.c                  |    9 
 xen/arch/x86/mm/paging.c            |   11 -
 xen/arch/x86/mm/shadow/private.h    |    2 
 xen/common/domain.c                 |    2 
 xen/common/event_channel.c          |   14 -
 xen/common/keyhandler.c             |    6 
 xen/common/schedule.c               |    2 
 xen/drivers/passthrough/vtd/iommu.c |    7 
 xen/drivers/video/vesa.c            |    2 
 xen/include/asm-x86/bitops.h        |  361 ++++++++++++++++++++----------------
 xen/include/asm-x86/event.h         |    5 
 xen/include/asm-x86/grant_table.h   |    2 
 xen/include/asm-x86/hvm/support.h   |    2 
 xen/include/asm-x86/hvm/svm/vmcb.h  |    2 
 xen/include/asm-x86/hvm/vmx/vmcs.h  |    2 
 xen/include/asm-x86/shared.h        |    8 
 xen/include/xen/shared.h            |   36 +--
 23 files changed, 305 insertions(+), 244 deletions(-)

diff -r f33328217eee -r af33f2054f47 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Mon Mar 10 22:51:57 2008 +0000
+++ b/xen/arch/x86/domain.c     Sun Mar 16 14:11:34 2008 +0000
@@ -830,7 +830,7 @@ unmap_vcpu_info(struct vcpu *v)
     mfn = v->arch.vcpu_info_mfn;
     unmap_domain_page_global(v->vcpu_info);
 
-    v->vcpu_info = shared_info_addr(d, vcpu_info[v->vcpu_id]);
+    v->vcpu_info = (void *)&shared_info(d, vcpu_info[v->vcpu_id]);
     v->arch.vcpu_info_mfn = INVALID_MFN;
 
     put_page_and_type(mfn_to_page(mfn));
@@ -888,7 +888,7 @@ map_vcpu_info(struct vcpu *v, unsigned l
      */
     vcpu_info(v, evtchn_upcall_pending) = 1;
     for ( i = 0; i < BITS_PER_GUEST_LONG(d); i++ )
-        set_bit(i, vcpu_info_addr(v, evtchn_pending_sel));
+        set_bit(i, &vcpu_info(v, evtchn_pending_sel));
 
     /*
      * Only bother to update time for the current vcpu.  If we're
diff -r f33328217eee -r af33f2054f47 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Mon Mar 10 22:51:57 2008 +0000
+++ b/xen/arch/x86/hvm/hvm.c    Sun Mar 16 14:11:34 2008 +0000
@@ -59,8 +59,8 @@ struct hvm_function_table hvm_funcs __re
 struct hvm_function_table hvm_funcs __read_mostly;
 
 /* I/O permission bitmap is globally shared by all HVM guests. */
-char __attribute__ ((__section__ (".bss.page_aligned")))
-    hvm_io_bitmap[3*PAGE_SIZE];
+unsigned long __attribute__ ((__section__ (".bss.page_aligned")))
+    hvm_io_bitmap[3*PAGE_SIZE/BYTES_PER_LONG];
 
 void hvm_enable(struct hvm_function_table *fns)
 {
diff -r f33328217eee -r af33f2054f47 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c       Mon Mar 10 22:51:57 2008 +0000
+++ b/xen/arch/x86/hvm/svm/vmcb.c       Sun Mar 16 14:11:34 2008 +0000
@@ -80,27 +80,27 @@ struct host_save_area *alloc_host_save_a
 
 void svm_disable_intercept_for_msr(struct vcpu *v, u32 msr)
 {
-    char *msr_bitmap = v->arch.hvm_svm.msrpm;
+    unsigned long *msr_bitmap = v->arch.hvm_svm.msrpm;
 
     /*
      * See AMD64 Programmers Manual, Vol 2, Section 15.10 (MSR-Bitmap Address).
      */
     if ( msr <= 0x1fff )
     {
-        __clear_bit(msr*2, msr_bitmap + 0x000); 
-        __clear_bit(msr*2+1, msr_bitmap + 0x000); 
+        __clear_bit(msr*2, msr_bitmap + 0x000/BYTES_PER_LONG); 
+        __clear_bit(msr*2+1, msr_bitmap + 0x000/BYTES_PER_LONG); 
     }
     else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
     {
         msr &= 0x1fff;
-        __clear_bit(msr*2, msr_bitmap + 0x800);
-        __clear_bit(msr*2+1, msr_bitmap + 0x800);
+        __clear_bit(msr*2, msr_bitmap + 0x800/BYTES_PER_LONG);
+        __clear_bit(msr*2+1, msr_bitmap + 0x800/BYTES_PER_LONG);
     } 
     else if ( (msr >= 0xc001000) && (msr <= 0xc0011fff) )
     {
         msr &= 0x1fff;
-        __clear_bit(msr*2, msr_bitmap + 0x1000);
-        __clear_bit(msr*2+1, msr_bitmap + 0x1000);
+        __clear_bit(msr*2, msr_bitmap + 0x1000/BYTES_PER_LONG);
+        __clear_bit(msr*2+1, msr_bitmap + 0x1000/BYTES_PER_LONG);
     }
 }
 
diff -r f33328217eee -r af33f2054f47 xen/arch/x86/hvm/vlapic.c
--- a/xen/arch/x86/hvm/vlapic.c Mon Mar 10 22:51:57 2008 +0000
+++ b/xen/arch/x86/hvm/vlapic.c Sun Mar 16 14:11:34 2008 +0000
@@ -83,15 +83,17 @@ static unsigned int vlapic_lvt_mask[VLAP
  */
 
 #define VEC_POS(v) ((v)%32)
-#define REG_POS(v) (((v)/32)* 0x10)
-#define vlapic_test_and_set_vector(vec, bitmap)                 \
-    test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec))
-#define vlapic_test_and_clear_vector(vec, bitmap)               \
-    test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec))
-#define vlapic_set_vector(vec, bitmap)                          \
-    set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec))
-#define vlapic_clear_vector(vec, bitmap)                        \
-    clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec))
+#define REG_POS(v) (((v)/32) * 0x10)
+#define vlapic_test_and_set_vector(vec, bitmap)                         \
+    test_and_set_bit(VEC_POS(vec),                                      \
+                     (unsigned long *)((bitmap) + REG_POS(vec)))
+#define vlapic_test_and_clear_vector(vec, bitmap)                       \
+    test_and_clear_bit(VEC_POS(vec),                                    \
+                       (unsigned long *)((bitmap) + REG_POS(vec)))
+#define vlapic_set_vector(vec, bitmap)                                  \
+    set_bit(VEC_POS(vec), (unsigned long *)((bitmap) + REG_POS(vec)))
+#define vlapic_clear_vector(vec, bitmap)                                \
+    clear_bit(VEC_POS(vec), (unsigned long *)((bitmap) + REG_POS(vec)))
 
 static int vlapic_find_highest_vector(void *bitmap)
 {
@@ -112,12 +114,14 @@ static int vlapic_find_highest_vector(vo
 
 static int vlapic_test_and_set_irr(int vector, struct vlapic *vlapic)
 {
-    return vlapic_test_and_set_vector(vector, &vlapic->regs->data[APIC_IRR]);
+    return vlapic_test_and_set_vector(
+        vector, (unsigned long *)&vlapic->regs->data[APIC_IRR]);
 }
 
 static void vlapic_clear_irr(int vector, struct vlapic *vlapic)
 {
-    vlapic_clear_vector(vector, &vlapic->regs->data[APIC_IRR]);
+    vlapic_clear_vector(
+        vector, (unsigned long *)&vlapic->regs->data[APIC_IRR]);
 }
 
 static int vlapic_find_highest_irr(struct vlapic *vlapic)
diff -r f33328217eee -r af33f2054f47 xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c       Mon Mar 10 22:51:57 2008 +0000
+++ b/xen/arch/x86/hvm/vmx/vmcs.c       Sun Mar 16 14:11:34 2008 +0000
@@ -413,7 +413,7 @@ static void vmx_set_host_env(struct vcpu
 
 void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr)
 {
-    char *msr_bitmap = v->arch.hvm_vmx.msr_bitmap;
+    unsigned long *msr_bitmap = v->arch.hvm_vmx.msr_bitmap;
 
     /* VMX MSR bitmap supported? */
     if ( msr_bitmap == NULL )
@@ -426,14 +426,14 @@ void vmx_disable_intercept_for_msr(struc
      */
     if ( msr <= 0x1fff )
     {
-        __clear_bit(msr, msr_bitmap + 0x000); /* read-low */
-        __clear_bit(msr, msr_bitmap + 0x800); /* write-low */
+        __clear_bit(msr, msr_bitmap + 0x000/BYTES_PER_LONG); /* read-low */
+        __clear_bit(msr, msr_bitmap + 0x800/BYTES_PER_LONG); /* write-low */
     }
     else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
     {
         msr &= 0x1fff;
-        __clear_bit(msr, msr_bitmap + 0x400); /* read-high */
-        __clear_bit(msr, msr_bitmap + 0xc00); /* write-high */
+        __clear_bit(msr, msr_bitmap + 0x400/BYTES_PER_LONG); /* read-high */
+        __clear_bit(msr, msr_bitmap + 0xc00/BYTES_PER_LONG); /* write-high */
     }
 }
 
@@ -456,7 +456,7 @@ static int construct_vmcs(struct vcpu *v
     /* MSR access bitmap. */
     if ( cpu_has_vmx_msr_bitmap )
     {
-        char *msr_bitmap = alloc_xenheap_page();
+        unsigned long *msr_bitmap = alloc_xenheap_page();
 
         if ( msr_bitmap == NULL )
             return -ENOMEM;
diff -r f33328217eee -r af33f2054f47 xen/arch/x86/hvm/vmx/vpmu_core2.c
--- a/xen/arch/x86/hvm/vmx/vpmu_core2.c Mon Mar 10 22:51:57 2008 +0000
+++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c Sun Mar 16 14:11:34 2008 +0000
@@ -101,7 +101,7 @@ static int is_core2_vpmu_msr(u32 msr_ind
     return 0;
 }
 
-static void core2_vpmu_set_msr_bitmap(char *msr_bitmap)
+static void core2_vpmu_set_msr_bitmap(unsigned long *msr_bitmap)
 {
     int i;
 
@@ -109,12 +109,14 @@ static void core2_vpmu_set_msr_bitmap(ch
     for ( i = 0; i < core2_counters.num; i++ )
     {
         clear_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap);
-        clear_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap+0x800);
+        clear_bit(msraddr_to_bitpos(core2_counters.msr[i]),
+                  msr_bitmap + 0x800/BYTES_PER_LONG);
     }
     for ( i = 0; i < core2_get_pmc_count(); i++ )
     {
         clear_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), msr_bitmap);
-        clear_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), msr_bitmap+0x800);
+        clear_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i),
+                  msr_bitmap + 0x800/BYTES_PER_LONG);
     }
 
     /* Allow Read PMU Non-global Controls Directly. */
@@ -124,19 +126,21 @@ static void core2_vpmu_set_msr_bitmap(ch
         clear_bit(msraddr_to_bitpos(MSR_P6_EVNTSEL0+i), msr_bitmap);
 }
 
-static void core2_vpmu_unset_msr_bitmap(char *msr_bitmap)
+static void core2_vpmu_unset_msr_bitmap(unsigned long *msr_bitmap)
 {
     int i;
 
     for ( i = 0; i < core2_counters.num; i++ )
     {
         set_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap);
-        set_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap+0x800);
+        set_bit(msraddr_to_bitpos(core2_counters.msr[i]),
+                msr_bitmap + 0x800/BYTES_PER_LONG);
     }
     for ( i = 0; i < core2_get_pmc_count(); i++ )
     {
         set_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), msr_bitmap);
-        set_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), msr_bitmap+0x800);
+        set_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i),
+                msr_bitmap + 0x800/BYTES_PER_LONG);
     }
     for ( i = 0; i < core2_ctrls.num; i++ )
         set_bit(msraddr_to_bitpos(core2_ctrls.msr[i]), msr_bitmap);
diff -r f33328217eee -r af33f2054f47 xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c        Mon Mar 10 22:51:57 2008 +0000
+++ b/xen/arch/x86/irq.c        Sun Mar 16 14:11:34 2008 +0000
@@ -362,13 +362,12 @@ int pirq_guest_unmask(struct domain *d)
 int pirq_guest_unmask(struct domain *d)
 {
     unsigned int   irq;
-    shared_info_t *s = d->shared_info;
 
     for ( irq = find_first_bit(d->pirq_mask, NR_IRQS);
           irq < NR_IRQS;
           irq = find_next_bit(d->pirq_mask, NR_IRQS, irq+1) )
     {
-        if ( !test_bit(d->pirq_to_evtchn[irq], __shared_info_addr(d, s, 
evtchn_mask)) )
+        if ( !test_bit(d->pirq_to_evtchn[irq], &shared_info(d, evtchn_mask)) )
             __pirq_guest_eoi(d, irq);
     }
 
@@ -660,13 +659,13 @@ static void dump_irqs(unsigned char key)
                 printk("%u(%c%c%c%c)",
                        d->domain_id,
                        (test_bit(d->pirq_to_evtchn[irq],
-                                 shared_info_addr(d, evtchn_pending)) ?
+                                 &shared_info(d, evtchn_pending)) ?
                         'P' : '-'),
                        (test_bit(d->pirq_to_evtchn[irq]/BITS_PER_GUEST_LONG(d),
-                                 vcpu_info_addr(d->vcpu[0], 
evtchn_pending_sel)) ?
+                                 &vcpu_info(d->vcpu[0], evtchn_pending_sel)) ?
                         'S' : '-'),
                        (test_bit(d->pirq_to_evtchn[irq],
-                                 shared_info_addr(d, evtchn_mask)) ?
+                                 &shared_info(d, evtchn_mask)) ?
                         'M' : '-'),
                        (test_bit(irq, d->pirq_mask) ?
                         'M' : '-'));
diff -r f33328217eee -r af33f2054f47 xen/arch/x86/mm/paging.c
--- a/xen/arch/x86/mm/paging.c  Mon Mar 10 22:51:57 2008 +0000
+++ b/xen/arch/x86/mm/paging.c  Sun Mar 16 14:11:34 2008 +0000
@@ -114,7 +114,8 @@ static mfn_t paging_new_log_dirty_page(s
     return mfn;
 }
 
-static mfn_t paging_new_log_dirty_leaf(struct domain *d, uint8_t **leaf_p)
+static mfn_t paging_new_log_dirty_leaf(
+    struct domain *d, unsigned long **leaf_p)
 {
     mfn_t mfn = paging_new_log_dirty_page(d, (void **)leaf_p);
     if ( mfn_valid(mfn) )
@@ -264,7 +265,7 @@ void paging_mark_dirty(struct domain *d,
     mfn_t gmfn;
     int changed;
     mfn_t mfn, *l4, *l3, *l2;
-    uint8_t *l1;
+    unsigned long *l1;
     int i1, i2, i3, i4;
 
     gmfn = _mfn(guest_mfn);
@@ -341,7 +342,7 @@ int paging_log_dirty_op(struct domain *d
     int rv = 0, clean = 0, peek = 1;
     unsigned long pages = 0;
     mfn_t *l4, *l3, *l2;
-    uint8_t *l1;
+    unsigned long *l1;
     int i4, i3, i2;
 
     domain_pause(d);
@@ -399,7 +400,7 @@ int paging_log_dirty_op(struct domain *d
                   (pages < sc->pages) && (i2 < LOGDIRTY_NODE_ENTRIES);
                   i2++ )
             {
-                static uint8_t zeroes[PAGE_SIZE];
+                static unsigned long zeroes[PAGE_SIZE/BYTES_PER_LONG];
                 unsigned int bytes = PAGE_SIZE;
                 l1 = ((l2 && mfn_valid(l2[i2])) ?
                       map_domain_page(mfn_x(l2[i2])) : zeroes);
@@ -408,7 +409,7 @@ int paging_log_dirty_op(struct domain *d
                 if ( likely(peek) )
                 {
                     if ( copy_to_guest_offset(sc->dirty_bitmap, pages >> 3,
-                                              l1, bytes) != 0 )
+                                              (uint8_t *)l1, bytes) != 0 )
                     {
                         rv = -EFAULT;
                         goto out;
diff -r f33328217eee -r af33f2054f47 xen/arch/x86/mm/shadow/private.h
--- a/xen/arch/x86/mm/shadow/private.h  Mon Mar 10 22:51:57 2008 +0000
+++ b/xen/arch/x86/mm/shadow/private.h  Sun Mar 16 14:11:34 2008 +0000
@@ -483,7 +483,7 @@ sh_mfn_is_dirty(struct domain *d, mfn_t 
 {
     unsigned long pfn;
     mfn_t mfn, *l4, *l3, *l2;
-    uint8_t *l1;
+    unsigned long *l1;
     int rv;
 
     ASSERT(shadow_mode_log_dirty(d));
diff -r f33328217eee -r af33f2054f47 xen/common/domain.c
--- a/xen/common/domain.c       Mon Mar 10 22:51:57 2008 +0000
+++ b/xen/common/domain.c       Sun Mar 16 14:11:34 2008 +0000
@@ -154,7 +154,7 @@ struct vcpu *alloc_vcpu(
     if ( !is_idle_domain(d) )
     {
         set_bit(_VPF_down, &v->pause_flags);
-        v->vcpu_info = shared_info_addr(d, vcpu_info[vcpu_id]);
+        v->vcpu_info = (void *)&shared_info(d, vcpu_info[vcpu_id]);
     }
 
     if ( sched_init_vcpu(v, cpu_id) != 0 )
diff -r f33328217eee -r af33f2054f47 xen/common/event_channel.c
--- a/xen/common/event_channel.c        Mon Mar 10 22:51:57 2008 +0000
+++ b/xen/common/event_channel.c        Sun Mar 16 14:11:34 2008 +0000
@@ -539,7 +539,6 @@ void evtchn_set_pending(struct vcpu *v, 
 void evtchn_set_pending(struct vcpu *v, int port)
 {
     struct domain *d = v->domain;
-    shared_info_t *s = d->shared_info;
 
     /*
      * The following bit operations must happen in strict order.
@@ -548,12 +547,12 @@ void evtchn_set_pending(struct vcpu *v, 
      * others may require explicit memory barriers.
      */
 
-    if ( test_and_set_bit(port, __shared_info_addr(d, s, evtchn_pending)) )
+    if ( test_and_set_bit(port, &shared_info(d, evtchn_pending)) )
         return;
 
-    if ( !test_bit        (port, __shared_info_addr(d, s, evtchn_mask)) &&
+    if ( !test_bit        (port, &shared_info(d, evtchn_mask)) &&
          !test_and_set_bit(port / BITS_PER_GUEST_LONG(d),
-                           vcpu_info_addr(v, evtchn_pending_sel)) )
+                           &vcpu_info(v, evtchn_pending_sel)) )
     {
         vcpu_mark_events_pending(v);
     }
@@ -750,7 +749,6 @@ static long evtchn_unmask(evtchn_unmask_
 static long evtchn_unmask(evtchn_unmask_t *unmask)
 {
     struct domain *d = current->domain;
-    shared_info_t *s = d->shared_info;
     int            port = unmask->port;
     struct vcpu   *v;
 
@@ -768,10 +766,10 @@ static long evtchn_unmask(evtchn_unmask_
      * These operations must happen in strict order. Based on
      * include/xen/event.h:evtchn_set_pending(). 
      */
-    if ( test_and_clear_bit(port, __shared_info_addr(d, s, evtchn_mask)) &&
-         test_bit          (port, __shared_info_addr(d, s, evtchn_pending)) &&
+    if ( test_and_clear_bit(port, &shared_info(d, evtchn_mask)) &&
+         test_bit          (port, &shared_info(d, evtchn_pending)) &&
          !test_and_set_bit (port / BITS_PER_GUEST_LONG(d),
-                            vcpu_info_addr(v, evtchn_pending_sel)) )
+                            &vcpu_info(v, evtchn_pending_sel)) )
     {
         vcpu_mark_events_pending(v);
     }
diff -r f33328217eee -r af33f2054f47 xen/common/keyhandler.c
--- a/xen/common/keyhandler.c   Mon Mar 10 22:51:57 2008 +0000
+++ b/xen/common/keyhandler.c   Sun Mar 16 14:11:34 2008 +0000
@@ -201,12 +201,12 @@ static void dump_domains(unsigned char k
             printk("    Notifying guest (virq %d, port %d, stat %d/%d/%d)\n",
                    VIRQ_DEBUG, v->virq_to_evtchn[VIRQ_DEBUG],
                    test_bit(v->virq_to_evtchn[VIRQ_DEBUG], 
-                            shared_info_addr(d, evtchn_pending)),
+                            &shared_info(d, evtchn_pending)),
                    test_bit(v->virq_to_evtchn[VIRQ_DEBUG], 
-                            shared_info_addr(d, evtchn_mask)),
+                            &shared_info(d, evtchn_mask)),
                    test_bit(v->virq_to_evtchn[VIRQ_DEBUG] /
                             BITS_PER_GUEST_LONG(d),
-                            vcpu_info_addr(v, evtchn_pending_sel)));
+                            &vcpu_info(v, evtchn_pending_sel)));
             send_guest_vcpu_virq(v, VIRQ_DEBUG);
         }
     }
diff -r f33328217eee -r af33f2054f47 xen/common/schedule.c
--- a/xen/common/schedule.c     Mon Mar 10 22:51:57 2008 +0000
+++ b/xen/common/schedule.c     Sun Mar 16 14:11:34 2008 +0000
@@ -365,7 +365,7 @@ static long do_poll(struct sched_poll *s
             goto out;
 
         rc = 0;
-        if ( test_bit(port, shared_info_addr(d, evtchn_pending)) )
+        if ( test_bit(port, &shared_info(d, evtchn_pending)) )
             goto out;
     }
 
diff -r f33328217eee -r af33f2054f47 xen/drivers/passthrough/vtd/iommu.c
--- a/xen/drivers/passthrough/vtd/iommu.c       Mon Mar 10 22:51:57 2008 +0000
+++ b/xen/drivers/passthrough/vtd/iommu.c       Sun Mar 16 14:11:34 2008 +0000
@@ -39,8 +39,8 @@
 #define domain_iommu_domid(d) ((d)->arch.hvm_domain.hvm_iommu.iommu_domid)
 
 static spinlock_t domid_bitmap_lock;    /* protect domain id bitmap */
-static int domid_bitmap_size;           /* domain id bitmap size in bit */
-static void *domid_bitmap;              /* iommu domain id bitmap */
+static int domid_bitmap_size;           /* domain id bitmap size in bits */
+static unsigned long *domid_bitmap;     /* iommu domain id bitmap */
 
 #define DID_FIELD_WIDTH 16
 #define DID_HIGH_OFFSET 8
@@ -1885,7 +1885,8 @@ int iommu_setup(void)
 
     /* Allocate domain id bitmap, and set bit 0 as reserved */
     domid_bitmap_size = cap_ndoms(iommu->cap);
-    domid_bitmap = xmalloc_bytes(domid_bitmap_size / 8);
+    domid_bitmap = xmalloc_array(unsigned long,
+                                 BITS_TO_LONGS(domid_bitmap_size));
     if ( domid_bitmap == NULL )
         goto error;
     memset(domid_bitmap, 0, domid_bitmap_size / 8);
diff -r f33328217eee -r af33f2054f47 xen/drivers/video/vesa.c
--- a/xen/drivers/video/vesa.c  Mon Mar 10 22:51:57 2008 +0000
+++ b/xen/drivers/video/vesa.c  Sun Mar 16 14:11:34 2008 +0000
@@ -219,7 +219,7 @@ static void vesa_show_line(
                      ((font->width + 7) >> 3));
             for ( b = font->width; b--; )
             {
-                pixel = test_bit(b, bits) ? pixel_on : 0;
+                pixel = (*bits & (1u<<b)) ? pixel_on : 0;
                 memcpy(ptr, &pixel, bpp);
                 ptr += bpp;
             }
diff -r f33328217eee -r af33f2054f47 xen/include/asm-x86/bitops.h
--- a/xen/include/asm-x86/bitops.h      Mon Mar 10 22:51:57 2008 +0000
+++ b/xen/include/asm-x86/bitops.h      Sun Mar 16 14:11:34 2008 +0000
@@ -25,6 +25,9 @@
 #define ADDR (*(volatile long *) addr)
 #define CONST_ADDR (*(const volatile long *) addr)
 
+extern void __bitop_bad_size(void);
+#define bitop_bad_size(addr) (min(sizeof(*(addr)), __alignof__(*(addr))) < 4)
+
 /**
  * set_bit - Atomically set a bit in memory
  * @nr: the bit to set
@@ -35,13 +38,18 @@
  * Note that @nr may be almost arbitrarily large; this function is not
  * restricted to acting on a single-word quantity.
  */
-static __inline__ void set_bit(int nr, volatile void * addr)
-{
-       __asm__ __volatile__( LOCK_PREFIX
-               "btsl %1,%0"
-               :"=m" (ADDR)
-               :"dIr" (nr), "m" (ADDR) : "memory");
-}
+static inline void set_bit(int nr, volatile void *addr)
+{
+    asm volatile (
+        LOCK_PREFIX
+        "btsl %1,%0"
+        : "=m" (ADDR)
+        : "Ir" (nr), "m" (ADDR) : "memory");
+}
+#define set_bit(nr, addr) ({                            \
+    if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
+    set_bit(nr, addr);                                  \
+})
 
 /**
  * __set_bit - Set a bit in memory
@@ -52,13 +60,17 @@ static __inline__ void set_bit(int nr, v
  * If it's called on the same region of memory simultaneously, the effect
  * may be that only one operation succeeds.
  */
-static __inline__ void __set_bit(int nr, volatile void * addr)
-{
-       __asm__(
-               "btsl %1,%0"
-               :"=m" (ADDR)
-               :"dIr" (nr), "m" (ADDR) : "memory");
-}
+static inline void __set_bit(int nr, volatile void *addr)
+{
+    asm volatile (
+        "btsl %1,%0"
+        : "=m" (ADDR)
+        : "Ir" (nr), "m" (ADDR) : "memory");
+}
+#define __set_bit(nr, addr) ({                          \
+    if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
+    __set_bit(nr, addr);                                \
+})
 
 /**
  * clear_bit - Clears a bit in memory
@@ -70,13 +82,18 @@ static __inline__ void __set_bit(int nr,
  * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  * in order to ensure changes are visible on other processors.
  */
-static __inline__ void clear_bit(int nr, volatile void * addr)
-{
-       __asm__ __volatile__( LOCK_PREFIX
-               "btrl %1,%0"
-               :"=m" (ADDR)
-               :"dIr" (nr), "m" (ADDR) : "memory");
-}
+static inline void clear_bit(int nr, volatile void *addr)
+{
+    asm volatile (
+        LOCK_PREFIX
+        "btrl %1,%0"
+        : "=m" (ADDR)
+        : "Ir" (nr), "m" (ADDR) : "memory");
+}
+#define clear_bit(nr, addr) ({                          \
+    if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
+    clear_bit(nr, addr);                                \
+})
 
 /**
  * __clear_bit - Clears a bit in memory
@@ -87,16 +104,20 @@ static __inline__ void clear_bit(int nr,
  * If it's called on the same region of memory simultaneously, the effect
  * may be that only one operation succeeds.
  */
-static __inline__ void __clear_bit(int nr, volatile void * addr)
-{
-       __asm__(
-               "btrl %1,%0"
-               :"=m" (ADDR)
-               :"dIr" (nr), "m" (ADDR) : "memory");
-}
-
-#define smp_mb__before_clear_bit()     barrier()
-#define smp_mb__after_clear_bit()      barrier()
+static inline void __clear_bit(int nr, volatile void *addr)
+{
+    asm volatile (
+        "btrl %1,%0"
+        : "=m" (ADDR)
+        : "Ir" (nr), "m" (ADDR) : "memory");
+}
+#define __clear_bit(nr, addr) ({                        \
+    if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
+    __clear_bit(nr, addr);                              \
+})
+
+#define smp_mb__before_clear_bit() barrier()
+#define smp_mb__after_clear_bit()  barrier()
 
 /**
  * __change_bit - Toggle a bit in memory
@@ -107,13 +128,17 @@ static __inline__ void __clear_bit(int n
  * If it's called on the same region of memory simultaneously, the effect
  * may be that only one operation succeeds.
  */
-static __inline__ void __change_bit(int nr, volatile void * addr)
-{
-       __asm__ __volatile__(
-               "btcl %1,%0"
-               :"=m" (ADDR)
-               :"dIr" (nr), "m" (ADDR) : "memory");
-}
+static inline void __change_bit(int nr, volatile void *addr)
+{
+    asm volatile (
+        "btcl %1,%0"
+        : "=m" (ADDR)
+        : "Ir" (nr), "m" (ADDR) : "memory");
+}
+#define __change_bit(nr, addr) ({                       \
+    if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
+    __change_bit(nr, addr);                             \
+})
 
 /**
  * change_bit - Toggle a bit in memory
@@ -124,13 +149,18 @@ static __inline__ void __change_bit(int 
  * Note that @nr may be almost arbitrarily large; this function is not
  * restricted to acting on a single-word quantity.
  */
-static __inline__ void change_bit(int nr, volatile void * addr)
-{
-       __asm__ __volatile__( LOCK_PREFIX
-               "btcl %1,%0"
-               :"=m" (ADDR)
-               :"dIr" (nr), "m" (ADDR) : "memory");
-}
+static inline void change_bit(int nr, volatile void *addr)
+{
+    asm volatile (
+        LOCK_PREFIX
+        "btcl %1,%0"
+        : "=m" (ADDR)
+        : "Ir" (nr), "m" (ADDR) : "memory");
+}
+#define change_bit(nr, addr) ({                         \
+    if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
+    change_bit(nr, addr);                               \
+})
 
 /**
  * test_and_set_bit - Set a bit and return its old value
@@ -140,16 +170,21 @@ static __inline__ void change_bit(int nr
  * This operation is atomic and cannot be reordered.  
  * It also implies a memory barrier.
  */
-static __inline__ int test_and_set_bit(int nr, volatile void * addr)
-{
-       int oldbit;
-
-       __asm__ __volatile__( LOCK_PREFIX
-               "btsl %2,%1\n\tsbbl %0,%0"
-               :"=r" (oldbit),"=m" (ADDR)
-               :"dIr" (nr), "m" (ADDR) : "memory");
-       return oldbit;
-}
+static inline int test_and_set_bit(int nr, volatile void *addr)
+{
+    int oldbit;
+
+    asm volatile (
+        LOCK_PREFIX
+        "btsl %2,%1\n\tsbbl %0,%0"
+        : "=r" (oldbit), "=m" (ADDR)
+        : "Ir" (nr), "m" (ADDR) : "memory");
+    return oldbit;
+}
+#define test_and_set_bit(nr, addr) ({                   \
+    if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
+    test_and_set_bit(nr, addr);                         \
+})
 
 /**
  * __test_and_set_bit - Set a bit and return its old value
@@ -160,16 +195,20 @@ static __inline__ int test_and_set_bit(i
  * If two examples of this operation race, one can appear to succeed
  * but actually fail.  You must protect multiple accesses with a lock.
  */
-static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
-{
-       int oldbit;
-
-       __asm__ __volatile__(
-               "btsl %2,%1\n\tsbbl %0,%0"
-               :"=r" (oldbit),"=m" (ADDR)
-               :"dIr" (nr), "m" (ADDR) : "memory");
-       return oldbit;
-}
+static inline int __test_and_set_bit(int nr, volatile void *addr)
+{
+    int oldbit;
+
+    asm volatile (
+        "btsl %2,%1\n\tsbbl %0,%0"
+        : "=r" (oldbit), "=m" (ADDR)
+        : "Ir" (nr), "m" (ADDR) : "memory");
+    return oldbit;
+}
+#define __test_and_set_bit(nr, addr) ({                 \
+    if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
+    __test_and_set_bit(nr, addr);                       \
+})
 
 /**
  * test_and_clear_bit - Clear a bit and return its old value
@@ -179,16 +218,21 @@ static __inline__ int __test_and_set_bit
  * This operation is atomic and cannot be reordered.  
  * It also implies a memory barrier.
  */
-static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
-{
-       int oldbit;
-
-       __asm__ __volatile__( LOCK_PREFIX
-               "btrl %2,%1\n\tsbbl %0,%0"
-               :"=r" (oldbit),"=m" (ADDR)
-               :"dIr" (nr), "m" (ADDR) : "memory");
-       return oldbit;
-}
+static inline int test_and_clear_bit(int nr, volatile void *addr)
+{
+    int oldbit;
+
+    asm volatile (
+        LOCK_PREFIX
+        "btrl %2,%1\n\tsbbl %0,%0"
+        : "=r" (oldbit), "=m" (ADDR)
+        : "Ir" (nr), "m" (ADDR) : "memory");
+    return oldbit;
+}
+#define test_and_clear_bit(nr, addr) ({                 \
+    if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
+    test_and_clear_bit(nr, addr);                       \
+})
 
 /**
  * __test_and_clear_bit - Clear a bit and return its old value
@@ -199,28 +243,36 @@ static __inline__ int test_and_clear_bit
  * If two examples of this operation race, one can appear to succeed
  * but actually fail.  You must protect multiple accesses with a lock.
  */
-static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
-{
-       int oldbit;
-
-       __asm__ __volatile__(
-               "btrl %2,%1\n\tsbbl %0,%0"
-               :"=r" (oldbit),"=m" (ADDR)
-               :"dIr" (nr), "m" (ADDR) : "memory");
-       return oldbit;
-}
+static inline int __test_and_clear_bit(int nr, volatile void *addr)
+{
+    int oldbit;
+
+    asm volatile (
+        "btrl %2,%1\n\tsbbl %0,%0"
+        : "=r" (oldbit), "=m" (ADDR)
+        : "Ir" (nr), "m" (ADDR) : "memory");
+    return oldbit;
+}
+#define __test_and_clear_bit(nr, addr) ({               \
+    if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
+    __test_and_clear_bit(nr, addr);                     \
+})
 
 /* WARNING: non atomic and it can be reordered! */
-static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
-{
-       int oldbit;
-
-       __asm__ __volatile__(
-               "btcl %2,%1\n\tsbbl %0,%0"
-               :"=r" (oldbit),"=m" (ADDR)
-               :"dIr" (nr), "m" (ADDR) : "memory");
-       return oldbit;
-}
+static inline int __test_and_change_bit(int nr, volatile void *addr)
+{
+    int oldbit;
+
+    asm volatile (
+        "btcl %2,%1\n\tsbbl %0,%0"
+        : "=r" (oldbit), "=m" (ADDR)
+        : "Ir" (nr), "m" (ADDR) : "memory");
+    return oldbit;
+}
+#define __test_and_change_bit(nr, addr) ({              \
+    if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
+    __test_and_change_bit(nr, addr);                    \
+})
 
 /**
  * test_and_change_bit - Change a bit and return its new value
@@ -230,38 +282,45 @@ static __inline__ int __test_and_change_
  * This operation is atomic and cannot be reordered.  
  * It also implies a memory barrier.
  */
-static __inline__ int test_and_change_bit(int nr, volatile void * addr)
-{
-       int oldbit;
-
-       __asm__ __volatile__( LOCK_PREFIX
-               "btcl %2,%1\n\tsbbl %0,%0"
-               :"=r" (oldbit),"=m" (ADDR)
-               :"dIr" (nr), "m" (ADDR) : "memory");
-       return oldbit;
-}
-
-
-static __inline__ int constant_test_bit(int nr, const volatile void * addr)
-{
-       return ((1U << (nr & 31)) & (((const volatile unsigned int *) addr)[nr 
>> 5])) != 0;
-}
-
-static __inline__ int variable_test_bit(int nr, const volatile void * addr)
-{
-       int oldbit;
-
-       __asm__ __volatile__(
-               "btl %2,%1\n\tsbbl %0,%0"
-               :"=r" (oldbit)
-               :"m" (CONST_ADDR),"dIr" (nr));
-       return oldbit;
-}
-
-#define test_bit(nr,addr) \
-(__builtin_constant_p(nr) ? \
- constant_test_bit((nr),(addr)) : \
- variable_test_bit((nr),(addr)))
+static inline int test_and_change_bit(int nr, volatile void *addr)
+{
+    int oldbit;
+
+    asm volatile (
+        LOCK_PREFIX
+        "btcl %2,%1\n\tsbbl %0,%0"
+        : "=r" (oldbit), "=m" (ADDR)
+        : "Ir" (nr), "m" (ADDR) : "memory");
+    return oldbit;
+}
+#define test_and_change_bit(nr, addr) ({                \
+    if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
+    test_and_change_bit(nr, addr);                      \
+})
+
+static inline int constant_test_bit(int nr, const volatile void *addr)
+{
+    return ((1U << (nr & 31)) &
+            (((const volatile unsigned int *)addr)[nr >> 5])) != 0;
+}
+
+static inline int variable_test_bit(int nr, const volatile void *addr)
+{
+    int oldbit;
+
+    asm volatile (
+        "btl %2,%1\n\tsbbl %0,%0"
+        : "=r" (oldbit)
+        : "m" (CONST_ADDR), "Ir" (nr) : "memory" );
+    return oldbit;
+}
+
+#define test_bit(nr, addr) ({                           \
+    if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
+    (__builtin_constant_p(nr) ?                         \
+     constant_test_bit((nr),(addr)) :                   \
+     variable_test_bit((nr),(addr)));                   \
+})
 
 extern unsigned int __find_first_bit(
     const unsigned long *addr, unsigned int size);
@@ -275,8 +334,8 @@ extern unsigned int __find_next_zero_bit
 /* return index of first bit set in val or BITS_PER_LONG when no bit is set */
 static inline unsigned int __scanbit(unsigned long val)
 {
-       __asm__ ( "bsf %1,%0" : "=r" (val) : "r" (val), "0" (BITS_PER_LONG) );
-       return (unsigned int)val;
+    asm ( "bsf %1,%0" : "=r" (val) : "r" (val), "0" (BITS_PER_LONG) );
+    return (unsigned int)val;
 }
 
 /**
@@ -335,10 +394,10 @@ static inline unsigned int __scanbit(uns
  * Returns the bit-number of the first set bit. If no bits are set then the
  * result is undefined.
  */
-static __inline__ unsigned int find_first_set_bit(unsigned long word)
-{
-       __asm__ ( "bsf %1,%0" : "=r" (word) : "r" (word) );
-       return (unsigned int)word;
+static inline unsigned int find_first_set_bit(unsigned long word)
+{
+    asm ( "bsf %1,%0" : "=r" (word) : "r" (word) );
+    return (unsigned int)word;
 }
 
 /**
@@ -349,10 +408,10 @@ static __inline__ unsigned int find_firs
  */
 static inline unsigned long ffz(unsigned long word)
 {
-       __asm__("bsf %1,%0"
-               :"=r" (word)
-               :"r" (~word));
-       return word;
+    asm ( "bsf %1,%0"
+          :"=r" (word)
+          :"r" (~word));
+    return word;
 }
 
 /**
@@ -365,13 +424,13 @@ static inline unsigned long ffz(unsigned
  */
 static inline int ffs(unsigned long x)
 {
-       long r;
-
-       __asm__("bsf %1,%0\n\t"
-               "jnz 1f\n\t"
-               "mov $-1,%0\n"
-               "1:" : "=r" (r) : "rm" (x));
-       return (int)r+1;
+    long r;
+
+    asm ( "bsf %1,%0\n\t"
+          "jnz 1f\n\t"
+          "mov $-1,%0\n"
+          "1:" : "=r" (r) : "rm" (x));
+    return (int)r+1;
 }
 
 /**
@@ -382,13 +441,13 @@ static inline int ffs(unsigned long x)
  */
 static inline int fls(unsigned long x)
 {
-       long r;
-
-       __asm__("bsr %1,%0\n\t"
-               "jnz 1f\n\t"
-               "mov $-1,%0\n"
-               "1:" : "=r" (r) : "rm" (x));
-       return (int)r+1;
+    long r;
+
+    asm ( "bsr %1,%0\n\t"
+          "jnz 1f\n\t"
+          "mov $-1,%0\n"
+          "1:" : "=r" (r) : "rm" (x));
+    return (int)r+1;
 }
 
 /**
diff -r f33328217eee -r af33f2054f47 xen/include/asm-x86/event.h
--- a/xen/include/asm-x86/event.h       Mon Mar 10 22:51:57 2008 +0000
+++ b/xen/include/asm-x86/event.h       Sun Mar 16 14:11:34 2008 +0000
@@ -30,7 +30,10 @@ static inline void vcpu_kick(struct vcpu
 
 static inline void vcpu_mark_events_pending(struct vcpu *v)
 {
-    if ( test_and_set_bit(0, &vcpu_info(v, evtchn_upcall_pending)) )
+    int already_pending = test_and_set_bit(
+        0, (unsigned long *)&vcpu_info(v, evtchn_upcall_pending));
+
+    if ( already_pending )
         return;
 
     if ( is_hvm_vcpu(v) )
diff -r f33328217eee -r af33f2054f47 xen/include/asm-x86/grant_table.h
--- a/xen/include/asm-x86/grant_table.h Mon Mar 10 22:51:57 2008 +0000
+++ b/xen/include/asm-x86/grant_table.h Sun Mar 16 14:11:34 2008 +0000
@@ -35,7 +35,7 @@ int replace_grant_host_mapping(
 
 static inline void gnttab_clear_flag(unsigned long nr, uint16_t *addr)
 {
-    clear_bit(nr, addr);
+    clear_bit(nr, (unsigned long *)addr);
 }
 
 /* Foreign mappings of HHVM-guest pages do not modify the type count. */
diff -r f33328217eee -r af33f2054f47 xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Mon Mar 10 22:51:57 2008 +0000
+++ b/xen/include/asm-x86/hvm/support.h Sun Mar 16 14:11:34 2008 +0000
@@ -78,7 +78,7 @@ extern unsigned int opt_hvm_debug_level;
 #define HVM_DBG_LOG(level, _f, _a...)
 #endif
 
-extern char hvm_io_bitmap[];
+extern unsigned long hvm_io_bitmap[];
 
 void hvm_enable(struct hvm_function_table *);
 
diff -r f33328217eee -r af33f2054f47 xen/include/asm-x86/hvm/svm/vmcb.h
--- a/xen/include/asm-x86/hvm/svm/vmcb.h        Mon Mar 10 22:51:57 2008 +0000
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h        Sun Mar 16 14:11:34 2008 +0000
@@ -448,7 +448,7 @@ struct arch_svm_struct {
     struct vmcb_struct *vmcb;
     u64    vmcb_pa;
     u64    asid_generation; /* ASID tracking, moved here for cache locality. */
-    char  *msrpm;
+    unsigned long *msrpm;
     int    launch_core;
     bool_t vmcb_in_sync;    /* VMCB sync'ed with VMSAVE? */
 };
diff -r f33328217eee -r af33f2054f47 xen/include/asm-x86/hvm/vmx/vmcs.h
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h        Mon Mar 10 22:51:57 2008 +0000
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h        Sun Mar 16 14:11:34 2008 +0000
@@ -81,7 +81,7 @@ struct arch_vmx_struct {
     unsigned long        cstar;
 #endif
 
-    char                *msr_bitmap;
+    unsigned long       *msr_bitmap;
     unsigned int         msr_count;
     struct vmx_msr_entry *msr_area;
     unsigned int         host_msr_count;
diff -r f33328217eee -r af33f2054f47 xen/include/asm-x86/shared.h
--- a/xen/include/asm-x86/shared.h      Mon Mar 10 22:51:57 2008 +0000
+++ b/xen/include/asm-x86/shared.h      Sun Mar 16 14:11:34 2008 +0000
@@ -3,9 +3,9 @@
 
 #ifdef CONFIG_COMPAT
 
-#define nmi_reason(d) (!has_32bit_shinfo(d) ?                              \
-                       (void *)&(d)->shared_info->native.arch.nmi_reason : \
-                       (void *)&(d)->shared_info->compat.arch.nmi_reason)
+#define nmi_reason(d) (!has_32bit_shinfo(d) ?                             \
+                       (u32 *)&(d)->shared_info->native.arch.nmi_reason : \
+                       (u32 *)&(d)->shared_info->compat.arch.nmi_reason)
 
 #define GET_SET_SHARED(type, field)                             \
 static inline type arch_get_##field(const struct domain *d)     \
@@ -41,7 +41,7 @@ static inline void arch_set_##field(stru
 
 #else
 
-#define nmi_reason(d) ((void *)&(d)->shared_info->arch.nmi_reason)
+#define nmi_reason(d) ((u32 *)&(d)->shared_info->arch.nmi_reason)
 
 #define GET_SET_SHARED(type, field)                             \
 static inline type arch_get_##field(const struct domain *d)     \
diff -r f33328217eee -r af33f2054f47 xen/include/xen/shared.h
--- a/xen/include/xen/shared.h  Mon Mar 10 22:51:57 2008 +0000
+++ b/xen/include/xen/shared.h  Sun Mar 16 14:11:34 2008 +0000
@@ -12,44 +12,36 @@ typedef union {
     struct compat_shared_info compat;
 } shared_info_t;
 
-#define __shared_info(d, s, field)      (*(!has_32bit_shinfo(d) ?       \
-                                           &(s)->native.field :         \
-                                           &(s)->compat.field))
-#define __shared_info_addr(d, s, field) (!has_32bit_shinfo(d) ?         \
-                                         (void *)&(s)->native.field :   \
-                                         (void *)&(s)->compat.field)
-
+/*
+ * Compat field is never larger than native field, so cast to that as it
+ * is the largest memory range it is safe for the caller to modify without
+ * further discrimination between compat and native cases.
+ */
+#define __shared_info(d, s, field)                      \
+    (*(!has_32bit_shinfo(d) ?                           \
+       (typeof(&(s)->compat.field))&(s)->native.field : \
+       (typeof(&(s)->compat.field))&(s)->compat.field))
 #define shared_info(d, field)                   \
     __shared_info(d, (d)->shared_info, field)
-#define shared_info_addr(d, field)                      \
-    __shared_info_addr(d, (d)->shared_info, field)
 
 typedef union {
     struct vcpu_info native;
     struct compat_vcpu_info compat;
 } vcpu_info_t;
 
-#define vcpu_info(v, field)      (*(!has_32bit_shinfo((v)->domain) ?    \
-                                    &(v)->vcpu_info->native.field :     \
-                                    &(v)->vcpu_info->compat.field))
-#define vcpu_info_addr(v, field) (!has_32bit_shinfo((v)->domain) ?        \
-                                  (void *)&(v)->vcpu_info->native.field : \
-                                  (void *)&(v)->vcpu_info->compat.field)
+/* As above, cast to compat field type. */
+#define vcpu_info(v, field)                                                   \
+    (*(!has_32bit_shinfo((v)->domain) ?                                       \
+       (typeof(&(v)->vcpu_info->compat.field))&(v)->vcpu_info->native.field : \
+       (typeof(&(v)->vcpu_info->compat.field))&(v)->vcpu_info->compat.field))
 
 #else
 
 typedef struct shared_info shared_info_t;
-
-#define __shared_info(d, s, field)      ((s)->field)
-#define __shared_info_addr(d, s, field) ((void *)&(s)->field)
-
 #define shared_info(d, field)           ((d)->shared_info->field)
-#define shared_info_addr(d, field)      ((void *)&(d)->shared_info->field)
 
 typedef struct vcpu_info vcpu_info_t;
-
 #define vcpu_info(v, field)             ((v)->vcpu_info->field)
-#define vcpu_info_addr(v, field)        ((void *)&(v)->vcpu_info->field)
 
 #endif
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86: Allow bitop functions to be applied only to fields of at least 4, Xen patchbot-unstable <=