WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] Merge with xen-ia64-unstable

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] Merge with xen-ia64-unstable
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Sat, 16 Jun 2007 05:57:32 -0700
Delivery-date: Sat, 16 Jun 2007 05:56:01 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir@xxxxxxxxxxxxx>
# Date 1181986926 -3600
# Node ID 093bc9dcbbcac00194861072b4ddcaa9a27a604a
# Parent  3b51eebdf9ab83e4473913358d4924370fa75763
# Parent  1feb91894e11d3b1fac3b4c38b1eb325c8acd20b
Merge with xen-ia64-unstable
---
 xen/arch/x86/hvm/hpet.c        |   43 +++++++++++++-
 xen/arch/x86/hvm/hvm.c         |    1 
 xen/arch/x86/hvm/i8254.c       |   87 ++++++++++++++++++++++-------
 xen/arch/x86/hvm/pmtimer.c     |   62 ++++++++++++++------
 xen/arch/x86/hvm/rtc.c         |   64 ++++++++++++++++++---
 xen/arch/x86/hvm/svm/vmcb.c    |   10 +++
 xen/arch/x86/hvm/vpt.c         |   85 +++++++++++++++++++++++++---
 xen/arch/x86/mm/hap/hap.c      |  123 ++++++++++++++---------------------------
 xen/arch/x86/mm/p2m.c          |    8 ++
 xen/common/kexec.c             |    3 +
 xen/common/timer.c             |    2 
 xen/include/asm-x86/hvm/vcpu.h |    3 +
 xen/include/asm-x86/hvm/vpt.h  |    6 +-
 13 files changed, 349 insertions(+), 148 deletions(-)

diff -r 3b51eebdf9ab -r 093bc9dcbbca xen/arch/x86/hvm/hpet.c
--- a/xen/arch/x86/hvm/hpet.c   Fri Jun 15 13:33:11 2007 -0600
+++ b/xen/arch/x86/hvm/hpet.c   Sat Jun 16 10:42:06 2007 +0100
@@ -113,6 +113,8 @@ static inline int hpet_check_access_leng
 
 static inline uint64_t hpet_read_maincounter(HPETState *h)
 {
+    ASSERT(spin_is_locked(&h->lock));
+
     if ( hpet_enabled(h) )
         return guest_time_hpet(h->vcpu) + h->mc_offset;
     else 
@@ -131,6 +133,8 @@ static unsigned long hpet_read(
     if ( hpet_check_access_length(addr, length) != 0 )
         return ~0UL;
 
+    spin_lock(&h->lock);
+
     val = hpet_read64(h, addr & ~7);
     if ( (addr & ~7) == HPET_COUNTER )
         val = hpet_read_maincounter(h);
@@ -139,12 +143,15 @@ static unsigned long hpet_read(
     if ( length != 8 )
         result = (val >> ((addr & 7) * 8)) & ((1UL << (length * 8)) - 1);
 
+    spin_unlock(&h->lock);
+
     return result;
 }
 
 static void hpet_stop_timer(HPETState *h, unsigned int tn)
 {
     ASSERT(tn < HPET_TIMER_NUM);
+    ASSERT(spin_is_locked(&h->lock));
     stop_timer(&h->timers[tn]);
 }
 
@@ -157,7 +164,8 @@ static void hpet_set_timer(HPETState *h,
     uint64_t tn_cmp, cur_tick, diff;
 
     ASSERT(tn < HPET_TIMER_NUM);
-    
+    ASSERT(spin_is_locked(&h->lock));
+
     if ( !hpet_enabled(h) || !timer_enabled(h, tn) )
         return;
 
@@ -212,6 +220,8 @@ static void hpet_write(
 
     if ( hpet_check_access_length(addr, length) != 0 )
         return;
+
+    spin_lock(&h->lock);
 
     old_val = hpet_read64(h, addr & ~7);
     if ( (addr & ~7) == HPET_COUNTER )
@@ -302,6 +312,8 @@ static void hpet_write(
         /* Ignore writes to unsupported and reserved registers. */
         break;
     }
+
+    spin_unlock(&h->lock);
 }
 
 static int hpet_range(struct vcpu *v, unsigned long addr)
@@ -320,6 +332,8 @@ static void hpet_route_interrupt(HPETSta
 {
     unsigned int tn_int_route = timer_int_route(h, tn);
     struct domain *d = h->vcpu->domain;
+
+    ASSERT(spin_is_locked(&h->lock));
 
     if ( (tn <= 1) && (h->hpet.config & HPET_CFG_LEGACY) )
     {
@@ -352,8 +366,13 @@ static void hpet_timer_fn(void *opaque)
     HPETState *h = htfi->hs;
     unsigned int tn = htfi->tn;
 
+    spin_lock(&h->lock);
+
     if ( !hpet_enabled(h) || !timer_enabled(h, tn) )
-        return;
+    {
+        spin_unlock(&h->lock);
+        return;
+    }
 
     hpet_route_interrupt(h, tn);
 
@@ -374,6 +393,8 @@ static void hpet_timer_fn(void *opaque)
         set_timer(&h->timers[tn], 
                   NOW() + hpet_tick_to_ns(h, h->hpet.period[tn]));
     }
+
+    spin_unlock(&h->lock);
 }
 
 void hpet_migrate_timers(struct vcpu *v)
@@ -391,12 +412,19 @@ static int hpet_save(struct domain *d, h
 static int hpet_save(struct domain *d, hvm_domain_context_t *h)
 {
     HPETState *hp = &d->arch.hvm_domain.pl_time.vhpet;
+    int rc;
+
+    spin_lock(&hp->lock);
 
     /* Write the proper value into the main counter */
     hp->hpet.mc64 = hp->mc_offset + guest_time_hpet(hp->vcpu);
 
     /* Save the HPET registers */
-    return hvm_save_entry(HPET, 0, h, &hp->hpet);
+    rc = hvm_save_entry(HPET, 0, h, &hp->hpet);
+
+    spin_unlock(&hp->lock);
+
+    return rc;
 }
 
 static int hpet_load(struct domain *d, hvm_domain_context_t *h)
@@ -404,9 +432,14 @@ static int hpet_load(struct domain *d, h
     HPETState *hp = &d->arch.hvm_domain.pl_time.vhpet;
     int i;
 
+    spin_lock(&hp->lock);
+
     /* Reload the HPET registers */
     if ( hvm_load_entry(HPET, h, &hp->hpet) )
+    {
+        spin_unlock(&hp->lock);
         return -EINVAL;
+    }
     
     /* Recalculate the offset between the main counter and guest time */
     hp->mc_offset = hp->hpet.mc64 - guest_time_hpet(hp->vcpu);
@@ -415,6 +448,8 @@ static int hpet_load(struct domain *d, h
     for ( i = 0; i < HPET_TIMER_NUM; i++ )
         hpet_set_timer(hp, i);
 
+    spin_unlock(&hp->lock);
+
     return 0;
 }
 
@@ -426,6 +461,8 @@ void hpet_init(struct vcpu *v)
     int i;
 
     memset(h, 0, sizeof(HPETState));
+
+    spin_lock_init(&h->lock);
 
     h->vcpu = v;
     h->tsc_freq = ticks_per_sec(v);
diff -r 3b51eebdf9ab -r 093bc9dcbbca xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Fri Jun 15 13:33:11 2007 -0600
+++ b/xen/arch/x86/hvm/hvm.c    Sat Jun 16 10:42:06 2007 +0100
@@ -401,6 +401,7 @@ int hvm_vcpu_initialise(struct vcpu *v)
         get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port;
     spin_unlock(&v->domain->arch.hvm_domain.ioreq.lock);
 
+    spin_lock_init(&v->arch.hvm_vcpu.tm_lock);
     INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list);
 
     if ( v->vcpu_id == 0 )
diff -r 3b51eebdf9ab -r 093bc9dcbbca xen/arch/x86/hvm/i8254.c
--- a/xen/arch/x86/hvm/i8254.c  Fri Jun 15 13:33:11 2007 -0600
+++ b/xen/arch/x86/hvm/i8254.c  Sat Jun 16 10:42:06 2007 +0100
@@ -82,6 +82,8 @@ static int pit_get_count(PITState *pit, 
     struct hvm_hw_pit_channel *c = &pit->hw.channels[channel];
     struct vcpu *v = vpit_vcpu(pit);
 
+    ASSERT(spin_is_locked(&pit->lock));
+
     d = muldiv64(hvm_get_guest_time(v) - pit->count_load_time[channel],
                  PIT_FREQ, ticks_per_sec(v));
 
@@ -111,6 +113,8 @@ static int pit_get_out(PITState *pit, in
     int out;
     struct vcpu *v = vpit_vcpu(pit);
 
+    ASSERT(spin_is_locked(&pit->lock));
+
     d = muldiv64(hvm_get_guest_time(v) - pit->count_load_time[channel], 
                  PIT_FREQ, ticks_per_sec(v));
 
@@ -142,6 +146,8 @@ static void pit_set_gate(PITState *pit, 
 {
     struct hvm_hw_pit_channel *s = &pit->hw.channels[channel];
     struct vcpu *v = vpit_vcpu(pit);
+
+    ASSERT(spin_is_locked(&pit->lock));
 
     switch ( s->mode )
     {
@@ -165,6 +171,7 @@ static void pit_set_gate(PITState *pit, 
 
 int pit_get_gate(PITState *pit, int channel)
 {
+    ASSERT(spin_is_locked(&pit->lock));
     return pit->hw.channels[channel].gate;
 }
 
@@ -181,10 +188,15 @@ static void pit_load_count(PITState *pit
     struct periodic_time *pt = &pit->pt[channel];
     struct vcpu *v = vpit_vcpu(pit);
 
+    ASSERT(spin_is_locked(&pit->lock));
+
     if ( val == 0 )
         val = 0x10000;
 
-    pit->count_load_time[channel] = hvm_get_guest_time(pt->vcpu);
+    if ( v == NULL )
+        rdtscll(pit->count_load_time[channel]);
+    else
+        pit->count_load_time[channel] = hvm_get_guest_time(v);
     s->count = val;
     period = DIV_ROUND((val * 1000000000ULL), PIT_FREQ);
 
@@ -209,23 +221,29 @@ static void pit_load_count(PITState *pit
     }
 }
 
-static void pit_latch_count(PITState *s, int channel)
-{
-    struct hvm_hw_pit_channel *c = &s->hw.channels[channel];
+static void pit_latch_count(PITState *pit, int channel)
+{
+    struct hvm_hw_pit_channel *c = &pit->hw.channels[channel];
+
+    ASSERT(spin_is_locked(&pit->lock));
+
     if ( !c->count_latched )
     {
-        c->latched_count = pit_get_count(s, channel);
+        c->latched_count = pit_get_count(pit, channel);
         c->count_latched = c->rw_mode;
     }
 }
 
-static void pit_latch_status(PITState *s, int channel)
-{
-    struct hvm_hw_pit_channel *c = &s->hw.channels[channel];
+static void pit_latch_status(PITState *pit, int channel)
+{
+    struct hvm_hw_pit_channel *c = &pit->hw.channels[channel];
+
+    ASSERT(spin_is_locked(&pit->lock));
+
     if ( !c->status_latched )
     {
         /* TODO: Return NULL COUNT (bit 6). */
-        c->status = ((pit_get_out(s, channel) << 7) |
+        c->status = ((pit_get_out(pit, channel) << 7) |
                      (c->rw_mode << 4) |
                      (c->mode << 1) |
                      c->bcd);
@@ -240,6 +258,8 @@ static void pit_ioport_write(struct PITS
 
     val  &= 0xff;
     addr &= 3;
+
+    spin_lock(&pit->lock);
 
     if ( addr == 3 )
     {
@@ -304,6 +324,8 @@ static void pit_ioport_write(struct PITS
             break;
         }
     }
+
+    spin_unlock(&pit->lock);
 }
 
 static uint32_t pit_ioport_read(struct PITState *pit, uint32_t addr)
@@ -313,6 +335,8 @@ static uint32_t pit_ioport_read(struct P
     
     addr &= 3;
     s = &pit->hw.channels[addr];
+
+    spin_lock(&pit->lock);
 
     if ( s->status_latched )
     {
@@ -364,12 +388,16 @@ static uint32_t pit_ioport_read(struct P
         }
     }
 
+    spin_unlock(&pit->lock);
+
     return ret;
 }
 
 void pit_stop_channel0_irq(PITState *pit)
 {
+    spin_lock(&pit->lock);
     destroy_periodic_time(&pit->pt[0]);
+    spin_unlock(&pit->lock);
 }
 
 #ifdef HVM_DEBUG_SUSPEND
@@ -422,11 +450,18 @@ static int pit_save(struct domain *d, hv
 static int pit_save(struct domain *d, hvm_domain_context_t *h)
 {
     PITState *pit = domain_vpit(d);
+    int rc;
+
+    spin_lock(&pit->lock);
     
     pit_info(pit);
 
     /* Save the PIT hardware state */
-    return hvm_save_entry(PIT, 0, h, &pit->hw);
+    rc = hvm_save_entry(PIT, 0, h, &pit->hw);
+
+    spin_unlock(&pit->lock);
+
+    return rc;
 }
 
 static int pit_load(struct domain *d, hvm_domain_context_t *h)
@@ -434,9 +469,14 @@ static int pit_load(struct domain *d, hv
     PITState *pit = domain_vpit(d);
     int i;
 
+    spin_lock(&pit->lock);
+
     /* Restore the PIT hardware state */
     if ( hvm_load_entry(PIT, h, &pit->hw) )
+    {
+        spin_unlock(&pit->lock);
         return 1;
+    }
     
     /* Recreate platform timers from hardware state.  There will be some 
      * time jitter here, but the wall-clock will have jumped massively, so 
@@ -448,6 +488,9 @@ static int pit_load(struct domain *d, hv
     }
 
     pit_info(pit);
+
+    spin_unlock(&pit->lock);
+
     return 0;
 }
 
@@ -456,17 +499,15 @@ void pit_init(struct vcpu *v, unsigned l
 void pit_init(struct vcpu *v, unsigned long cpu_khz)
 {
     PITState *pit = vcpu_vpit(v);
-    struct periodic_time *pt;
     struct hvm_hw_pit_channel *s;
     int i;
 
-    pt = &pit->pt[0];  
-    pt[0].vcpu = v;
-    pt[1].vcpu = v;
-    pt[2].vcpu = v;
+    spin_lock_init(&pit->lock);
+
+    /* Some sub-functions assert that they are called with the lock held. */
+    spin_lock(&pit->lock);
 
     register_portio_handler(v->domain, PIT_BASE, 4, handle_pit_io);
-    /* register the speaker port */
     register_portio_handler(v->domain, 0x61, 1, handle_speaker_io);
     ticks_per_sec(v) = cpu_khz * (int64_t)1000;
 
@@ -477,6 +518,8 @@ void pit_init(struct vcpu *v, unsigned l
         s->gate = (i != 2);
         pit_load_count(pit, i, 0);
     }
+
+    spin_unlock(&pit->lock);
 }
 
 void pit_deinit(struct domain *d)
@@ -492,10 +535,10 @@ static int handle_pit_io(ioreq_t *p)
 
     if ( (p->size != 1) || p->data_is_ptr || (p->type != IOREQ_TYPE_PIO) )
     {
-        gdprintk(XENLOG_WARNING, "HVM_PIT bad access\n");
+        gdprintk(XENLOG_WARNING, "PIT bad access\n");
         return 1;
     }
-    
+
     if ( p->dir == IOREQ_WRITE )
     {
         pit_ioport_write(vpit, p->addr, p->data);
@@ -505,7 +548,7 @@ static int handle_pit_io(ioreq_t *p)
         if ( (p->addr & 3) != 3 )
             p->data = pit_ioport_read(vpit, p->addr);
         else
-            gdprintk(XENLOG_WARNING, "HVM_PIT: read A1:A0=3!\n");
+            gdprintk(XENLOG_WARNING, "PIT: read A1:A0=3!\n");
     }
 
     return 1;
@@ -533,14 +576,18 @@ static int handle_speaker_io(ioreq_t *p)
 
     if ( (p->size != 1) || p->data_is_ptr || (p->type != IOREQ_TYPE_PIO) )
     {
-        gdprintk(XENLOG_WARNING, "HVM_SPEAKER bad access\n");
+        gdprintk(XENLOG_WARNING, "PIT_SPEAKER bad access\n");
         return 1;
     }
+
+    spin_lock(&vpit->lock);
 
     if ( p->dir == IOREQ_WRITE )
         speaker_ioport_write(vpit, p->addr, p->data);
     else
         p->data = speaker_ioport_read(vpit, p->addr);
+
+    spin_unlock(&vpit->lock);
 
     return 1;
 }
diff -r 3b51eebdf9ab -r 093bc9dcbbca xen/arch/x86/hvm/pmtimer.c
--- a/xen/arch/x86/hvm/pmtimer.c        Fri Jun 15 13:33:11 2007 -0600
+++ b/xen/arch/x86/hvm/pmtimer.c        Sat Jun 16 10:42:06 2007 +0100
@@ -53,6 +53,8 @@
 /* Dispatch SCIs based on the PM1a_STS and PM1a_EN registers */
 static void pmt_update_sci(PMTState *s)
 {
+    ASSERT(spin_is_locked(&s->lock));
+
     if ( s->pm.pm1a_en & s->pm.pm1a_sts & SCI_MASK )
         hvm_isa_irq_assert(s->vcpu->domain, SCI_IRQ);
     else
@@ -66,6 +68,8 @@ static void pmt_update_time(PMTState *s)
     uint64_t curr_gtime;
     uint32_t msb = s->pm.tmr_val & TMR_VAL_MSB;
     
+    ASSERT(spin_is_locked(&s->lock));
+
     /* Update the timer */
     curr_gtime = hvm_get_guest_time(s->vcpu);
     s->pm.tmr_val += ((curr_gtime - s->last_gtime) * s->scale) >> 32;
@@ -89,6 +93,8 @@ static void pmt_timer_callback(void *opa
     uint32_t pmt_cycles_until_flip;
     uint64_t time_until_flip;
 
+    spin_lock(&s->lock);
+
     /* Recalculate the timer and make sure we get an SCI if we need one */
     pmt_update_time(s);
 
@@ -103,8 +109,9 @@ static void pmt_timer_callback(void *opa
 
     /* Wake up again near the next bit-flip */
     set_timer(&s->timer, NOW() + time_until_flip + MILLISECS(1));
-}
-
+
+    spin_unlock(&s->lock);
+}
 
 /* Handle port I/O to the PM1a_STS and PM1a_EN registers */
 static int handle_evt_io(ioreq_t *p)
@@ -114,7 +121,9 @@ static int handle_evt_io(ioreq_t *p)
     uint32_t addr, data, byte;
     int i;
 
-    if ( p->dir == 0 ) /* Write */
+    spin_lock(&s->lock);
+
+    if ( p->dir == IOREQ_WRITE )
     {
         /* Handle this I/O one byte at a time */
         for ( i = p->size, addr = p->addr, data = p->data;
@@ -122,7 +131,7 @@ static int handle_evt_io(ioreq_t *p)
               i--, addr++, data >>= 8 )
         {
             byte = data & 0xff;
-            switch(addr) 
+            switch ( addr )
             {
                 /* PM1a_STS register bits are write-to-clear */
             case PM1a_STS_ADDR:
@@ -149,7 +158,7 @@ static int handle_evt_io(ioreq_t *p)
         /* Fix up the SCI state to match the new register state */
         pmt_update_sci(s);
     }
-    else /* Read */
+    else /* p->dir == IOREQ_READ */
     {
         data = s->pm.pm1a_sts | (((uint32_t) s->pm.pm1a_en) << 16);
         data >>= 8 * (p->addr - PM1a_STS_ADDR);
@@ -157,6 +166,9 @@ static int handle_evt_io(ioreq_t *p)
         else if ( p->size == 2 ) data &= 0xffff;
         p->data = data;
     }
+
+    spin_unlock(&s->lock);
+
     return 1;
 }
 
@@ -167,29 +179,31 @@ static int handle_pmt_io(ioreq_t *p)
     struct vcpu *v = current;
     PMTState *s = &v->domain->arch.hvm_domain.pl_time.vpmt;
 
-    if (p->size != 4 ||
-        p->data_is_ptr ||
-        p->type != IOREQ_TYPE_PIO){
-        printk("HVM_PMT: wrong PM timer IO\n");
+    if ( (p->size != 4) || p->data_is_ptr || (p->type != IOREQ_TYPE_PIO) )
+    {
+        gdprintk(XENLOG_WARNING, "HVM_PMT bad access\n");
         return 1;
     }
     
-    if (p->dir == 0) { /* write */
-        /* PM_TMR_BLK is read-only */
-        return 1;
-    } else if (p->dir == 1) { /* read */
+    if ( p->dir == IOREQ_READ )
+    {
+        spin_lock(&s->lock);
         pmt_update_time(s);
         p->data = s->pm.tmr_val;
+        spin_unlock(&s->lock);
         return 1;
     }
+
     return 0;
 }
 
 static int pmtimer_save(struct domain *d, hvm_domain_context_t *h)
 {
     PMTState *s = &d->arch.hvm_domain.pl_time.vpmt;
-    uint32_t msb = s->pm.tmr_val & TMR_VAL_MSB;
-    uint32_t x;
+    uint32_t x, msb = s->pm.tmr_val & TMR_VAL_MSB;
+    int rc;
+
+    spin_lock(&s->lock);
 
     /* Update the counter to the guest's current time.  We always save
      * with the domain paused, so the saved time should be after the
@@ -202,22 +216,33 @@ static int pmtimer_save(struct domain *d
     /* No point in setting the SCI here because we'll already have saved the 
      * IRQ and *PIC state; we'll fix it up when we restore the domain */
 
-    return hvm_save_entry(PMTIMER, 0, h, &s->pm);
+    rc = hvm_save_entry(PMTIMER, 0, h, &s->pm);
+
+    spin_unlock(&s->lock);
+
+    return rc;
 }
 
 static int pmtimer_load(struct domain *d, hvm_domain_context_t *h)
 {
     PMTState *s = &d->arch.hvm_domain.pl_time.vpmt;
+
+    spin_lock(&s->lock);
 
     /* Reload the registers */
     if ( hvm_load_entry(PMTIMER, h, &s->pm) )
+    {
+        spin_unlock(&s->lock);
         return -EINVAL;
+    }
 
     /* Calculate future counter values from now. */
     s->last_gtime = hvm_get_guest_time(s->vcpu);
 
     /* Set the SCI state from the registers */ 
     pmt_update_sci(s);
+
+    spin_unlock(&s->lock);
     
     return 0;
 }
@@ -225,14 +250,11 @@ HVM_REGISTER_SAVE_RESTORE(PMTIMER, pmtim
 HVM_REGISTER_SAVE_RESTORE(PMTIMER, pmtimer_save, pmtimer_load, 
                           1, HVMSR_PER_DOM);
 
-
 void pmtimer_init(struct vcpu *v)
 {
     PMTState *s = &v->domain->arch.hvm_domain.pl_time.vpmt;
 
-    s->pm.tmr_val = 0;
-    s->pm.pm1a_sts = 0;
-    s->pm.pm1a_en = 0;
+    spin_lock_init(&s->lock);
 
     s->scale = ((uint64_t)FREQUENCE_PMTIMER << 32) / ticks_per_sec(v);
     s->vcpu = v;
diff -r 3b51eebdf9ab -r 093bc9dcbbca xen/arch/x86/hvm/rtc.c
--- a/xen/arch/x86/hvm/rtc.c    Fri Jun 15 13:33:11 2007 -0600
+++ b/xen/arch/x86/hvm/rtc.c    Sat Jun 16 10:42:06 2007 +0100
@@ -34,10 +34,12 @@
                                        arch.hvm_domain.pl_time.vrtc))
 #define vrtc_vcpu(rtc)   (vrtc_domain(rtc)->vcpu[0])
 
-void rtc_periodic_cb(struct vcpu *v, void *opaque)
+static void rtc_periodic_cb(struct vcpu *v, void *opaque)
 {
     RTCState *s = opaque;
+    spin_lock(&s->lock);
     s->hw.cmos_data[RTC_REG_C] |= 0xc0;
+    spin_unlock(&s->lock);
 }
 
 int is_rtc_periodic_irq(void *opaque)
@@ -54,6 +56,8 @@ static void rtc_timer_update(RTCState *s
 {
     int period_code, period;
     struct vcpu *v = vrtc_vcpu(s);
+
+    ASSERT(spin_is_locked(&s->lock));
 
     period_code = s->hw.cmos_data[RTC_REG_A] & RTC_RATE_SELECT;
     if ( (period_code != 0) && (s->hw.cmos_data[RTC_REG_B] & RTC_PIE) )
@@ -78,14 +82,21 @@ static int rtc_ioport_write(void *opaque
 {
     RTCState *s = opaque;
 
+    spin_lock(&s->lock);
+
     if ( (addr & 1) == 0 )
     {
-        s->hw.cmos_index = data & 0x7f;
-        return (s->hw.cmos_index < RTC_CMOS_SIZE);
+        data &= 0x7f;
+        s->hw.cmos_index = data;
+        spin_unlock(&s->lock);
+        return (data < RTC_CMOS_SIZE);
     }
 
     if ( s->hw.cmos_index >= RTC_CMOS_SIZE )
+    {
+        spin_unlock(&s->lock);
         return 0;
+    }
 
     switch ( s->hw.cmos_index )
     {
@@ -134,6 +145,8 @@ static int rtc_ioport_write(void *opaque
         break;
     }
 
+    spin_unlock(&s->lock);
+
     return 1;
 }
 
@@ -158,6 +171,8 @@ static void rtc_set_time(RTCState *s)
     struct tm *tm = &s->current_tm;
     unsigned long before, after; /* XXX s_time_t */
       
+    ASSERT(spin_is_locked(&s->lock));
+
     before = mktime(tm->tm_year, tm->tm_mon, tm->tm_mday,
                    tm->tm_hour, tm->tm_min, tm->tm_sec);
     
@@ -182,6 +197,8 @@ static void rtc_copy_date(RTCState *s)
     const struct tm *tm = &s->current_tm;
     struct domain *d = vrtc_domain(s);
 
+    ASSERT(spin_is_locked(&s->lock));
+
     if ( s->time_offset_seconds != d->time_offset_seconds )
     {
         s->current_tm = gmtime(get_localtime(d));
@@ -230,6 +247,8 @@ static void rtc_next_second(RTCState *s)
     struct tm *tm = &s->current_tm;
     int days_in_month;
     struct domain *d = vrtc_domain(s);
+
+    ASSERT(spin_is_locked(&s->lock));
 
     if ( s->time_offset_seconds != d->time_offset_seconds )
     {
@@ -279,6 +298,8 @@ static void rtc_update_second(void *opaq
 {
     RTCState *s = opaque;
 
+    spin_lock(&s->lock);
+
     /* if the oscillator is not in normal operation, we do not update */
     if ( (s->hw.cmos_data[RTC_REG_A] & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ )
     {
@@ -295,12 +316,16 @@ static void rtc_update_second(void *opaq
         /* Delay time before update cycle */
         set_timer(&s->second_timer2, s->next_second_time + 244000);
     }
+
+    spin_unlock(&s->lock);
 }
 
 static void rtc_update_second2(void *opaque)
 {
     RTCState *s = opaque;
     struct domain *d = vrtc_domain(s);
+
+    spin_lock(&s->lock);
 
     if ( !(s->hw.cmos_data[RTC_REG_B] & RTC_SET) )
         rtc_copy_date(s);
@@ -337,15 +362,18 @@ static void rtc_update_second2(void *opa
 
     s->next_second_time += 1000000000ULL;
     set_timer(&s->second_timer, s->next_second_time);
-}
-
-static uint32_t rtc_ioport_read(void *opaque, uint32_t addr)
-{
-    RTCState *s = opaque;
+
+    spin_unlock(&s->lock);
+}
+
+static uint32_t rtc_ioport_read(RTCState *s, uint32_t addr)
+{
     int ret;
 
     if ( (addr & 1) == 0 )
         return 0xff;
+
+    spin_lock(&s->lock);
 
     switch ( s->hw.cmos_index )
     {
@@ -371,6 +399,8 @@ static uint32_t rtc_ioport_read(void *op
         break;
     }
 
+    spin_unlock(&s->lock);
+
     return ret;
 }
 
@@ -413,7 +443,11 @@ static int rtc_save(struct domain *d, hv
 static int rtc_save(struct domain *d, hvm_domain_context_t *h)
 {
     RTCState *s = domain_vrtc(d);
-    return hvm_save_entry(RTC, 0, h, &s->hw);
+    int rc;
+    spin_lock(&s->lock);
+    rc = hvm_save_entry(RTC, 0, h, &s->hw);
+    spin_unlock(&s->lock);
+    return rc;
 }
 
 /* Reload the hardware state from a saved domain */
@@ -421,9 +455,14 @@ static int rtc_load(struct domain *d, hv
 {
     RTCState *s = domain_vrtc(d);
 
+    spin_lock(&s->lock);
+
     /* Restore the registers */
     if ( hvm_load_entry(RTC, h, &s->hw) != 0 )
+    {
+        spin_unlock(&s->lock);
         return -EINVAL;
+    }
 
     /* Reset the wall-clock time.  In normal running, this runs with host 
      * time, so let's keep doing that. */
@@ -436,6 +475,8 @@ static int rtc_load(struct domain *d, hv
     /* Reset the periodic interrupt timer based on the registers */
     rtc_timer_update(s);
 
+    spin_unlock(&s->lock);
+
     return 0;
 }
 
@@ -445,6 +486,8 @@ void rtc_init(struct vcpu *v, int base)
 void rtc_init(struct vcpu *v, int base)
 {
     RTCState *s = vcpu_vrtc(v);
+
+    spin_lock_init(&s->lock);
 
     s->hw.cmos_data[RTC_REG_A] = RTC_REF_CLCK_32KHZ | 6; /* ~1kHz */
     s->hw.cmos_data[RTC_REG_B] = RTC_24H;
@@ -452,7 +495,10 @@ void rtc_init(struct vcpu *v, int base)
     s->hw.cmos_data[RTC_REG_D] = RTC_VRT;
 
     s->current_tm = gmtime(get_localtime(v->domain));
+
+    spin_lock(&s->lock);
     rtc_copy_date(s);
+    spin_unlock(&s->lock);
 
     init_timer(&s->second_timer, rtc_update_second, s, v->processor);
     init_timer(&s->second_timer2, rtc_update_second2, s, v->processor);
diff -r 3b51eebdf9ab -r 093bc9dcbbca xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c       Fri Jun 15 13:33:11 2007 -0600
+++ b/xen/arch/x86/hvm/svm/vmcb.c       Sat Jun 16 10:42:06 2007 +0100
@@ -236,6 +236,16 @@ static int construct_vmcb(struct vcpu *v
         vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */
         vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
         vmcb->cr4 = arch_svm->cpu_shadow_cr4 = 0;
+
+        /* No point in intercepting CR0/3/4 reads, because the hardware 
+         * will return the guest versions anyway. */
+        vmcb->cr_intercepts &= ~(CR_INTERCEPT_CR0_READ
+                                 |CR_INTERCEPT_CR3_READ
+                                 |CR_INTERCEPT_CR4_READ);
+
+        /* No point in intercepting INVLPG if we don't have shadow pagetables 
+         * that need to be fixed up. */
+        vmcb->general1_intercepts &= ~GENERAL1_INTERCEPT_INVLPG;
     }
     else
     {
diff -r 3b51eebdf9ab -r 093bc9dcbbca xen/arch/x86/hvm/vpt.c
--- a/xen/arch/x86/hvm/vpt.c    Fri Jun 15 13:33:11 2007 -0600
+++ b/xen/arch/x86/hvm/vpt.c    Sat Jun 16 10:42:06 2007 +0100
@@ -17,11 +17,31 @@
  * Place - Suite 330, Boston, MA 02111-1307 USA.
  *
  */
+
 #include <xen/time.h>
 #include <asm/hvm/support.h>
 #include <asm/hvm/vpt.h>
 #include <asm/event.h>
 
+static void pt_lock(struct periodic_time *pt)
+{
+    struct vcpu *v;
+
+    for ( ; ; )
+    {
+        v = pt->vcpu;
+        spin_lock(&v->arch.hvm_vcpu.tm_lock);
+        if ( likely(pt->vcpu == v) )
+            break;
+        spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+    }
+}
+
+static void pt_unlock(struct periodic_time *pt)
+{
+    spin_unlock(&pt->vcpu->arch.hvm_vcpu.tm_lock);
+}
+
 static void missed_ticks(struct periodic_time *pt)
 {
     s_time_t missed_ticks;
@@ -52,16 +72,22 @@ void pt_freeze_time(struct vcpu *v)
     if ( test_bit(_VPF_blocked, &v->pause_flags) )
         return;
 
+    spin_lock(&v->arch.hvm_vcpu.tm_lock);
+
     v->arch.hvm_vcpu.guest_time = hvm_get_guest_time(v);
 
     list_for_each_entry ( pt, head, list )
         stop_timer(&pt->timer);
+
+    spin_unlock(&v->arch.hvm_vcpu.tm_lock);
 }
 
 void pt_thaw_time(struct vcpu *v)
 {
     struct list_head *head = &v->arch.hvm_vcpu.tm_list;
     struct periodic_time *pt;
+
+    spin_lock(&v->arch.hvm_vcpu.tm_lock);
 
     if ( v->arch.hvm_vcpu.guest_time )
     {
@@ -74,11 +100,15 @@ void pt_thaw_time(struct vcpu *v)
             set_timer(&pt->timer, pt->scheduled);
         }
     }
+
+    spin_unlock(&v->arch.hvm_vcpu.tm_lock);
 }
 
 static void pt_timer_fn(void *data)
 {
     struct periodic_time *pt = data;
+
+    pt_lock(pt);
 
     pt->pending_intr_nr++;
     pt->scheduled += pt->period;
@@ -89,6 +119,8 @@ static void pt_timer_fn(void *data)
         set_timer(&pt->timer, pt->scheduled);
 
     vcpu_kick(pt->vcpu);
+
+    pt_unlock(pt);
 }
 
 void pt_update_irq(struct vcpu *v)
@@ -98,6 +130,8 @@ void pt_update_irq(struct vcpu *v)
     uint64_t max_lag = -1ULL;
     int irq = -1;
 
+    spin_lock(&v->arch.hvm_vcpu.tm_lock);
+
     list_for_each_entry ( pt, head, list )
     {
         if ( !is_isa_irq_masked(v, pt->irq) && pt->pending_intr_nr &&
@@ -108,6 +142,8 @@ void pt_update_irq(struct vcpu *v)
         }
     }
 
+    spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+
     if ( is_lvtt(v, irq) )
     {
         vlapic_set_irq(vcpu_vlapic(v), irq, 0);
@@ -119,7 +155,7 @@ void pt_update_irq(struct vcpu *v)
     }
 }
 
-struct periodic_time *is_pt_irq(struct vcpu *v, int vector, int type)
+static struct periodic_time *is_pt_irq(struct vcpu *v, int vector, int type)
 {
     struct list_head *head = &v->arch.hvm_vcpu.tm_list;
     struct periodic_time *pt;
@@ -152,25 +188,42 @@ struct periodic_time *is_pt_irq(struct v
 
 void pt_intr_post(struct vcpu *v, int vector, int type)
 {
-    struct periodic_time *pt = is_pt_irq(v, vector, type);
-
+    struct periodic_time *pt;
+    time_cb *cb;
+    void *cb_priv;
+
+    spin_lock(&v->arch.hvm_vcpu.tm_lock);
+
+    pt = is_pt_irq(v, vector, type);
     if ( pt == NULL )
-        return;
+    {
+        spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+        return;
+    }
+
+    ASSERT(pt->vcpu == v);
 
     pt->pending_intr_nr--;
     pt->last_plt_gtime += pt->period_cycles;
 
-    if ( hvm_get_guest_time(pt->vcpu) < pt->last_plt_gtime )
-        hvm_set_guest_time(pt->vcpu, pt->last_plt_gtime);
-
-    if ( pt->cb != NULL )
-        pt->cb(pt->vcpu, pt->priv);
+    if ( hvm_get_guest_time(v) < pt->last_plt_gtime )
+        hvm_set_guest_time(v, pt->last_plt_gtime);
+
+    cb = pt->cb;
+    cb_priv = pt->priv;
+
+    spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+
+    if ( cb != NULL )
+        cb(v, cb_priv);
 }
 
 void pt_reset(struct vcpu *v)
 {
     struct list_head *head = &v->arch.hvm_vcpu.tm_list;
     struct periodic_time *pt;
+
+    spin_lock(&v->arch.hvm_vcpu.tm_lock);
 
     list_for_each_entry ( pt, head, list )
     {
@@ -182,18 +235,24 @@ void pt_reset(struct vcpu *v)
             set_timer(&pt->timer, pt->scheduled);
         }
     }
+
+    spin_unlock(&v->arch.hvm_vcpu.tm_lock);
 }
 
 void pt_migrate(struct vcpu *v)
 {
     struct list_head *head = &v->arch.hvm_vcpu.tm_list;
     struct periodic_time *pt;
+
+    spin_lock(&v->arch.hvm_vcpu.tm_lock);
 
     list_for_each_entry ( pt, head, list )
     {
         if ( pt->enabled )
             migrate_timer(&pt->timer, v->processor);
     }
+
+    spin_unlock(&v->arch.hvm_vcpu.tm_lock);
 }
 
 void create_periodic_time(
@@ -201,6 +260,8 @@ void create_periodic_time(
     uint8_t irq, char one_shot, time_cb *cb, void *data)
 {
     destroy_periodic_time(pt);
+
+    spin_lock(&v->arch.hvm_vcpu.tm_lock);
 
     init_timer(&pt->timer, pt_timer_fn, pt, v->processor);
     pt->enabled = 1;
@@ -223,6 +284,8 @@ void create_periodic_time(
 
     list_add(&pt->list, &v->arch.hvm_vcpu.tm_list);
     set_timer(&pt->timer, pt->scheduled);
+
+    spin_unlock(&v->arch.hvm_vcpu.tm_lock);
 }
 
 void destroy_periodic_time(struct periodic_time *pt)
@@ -230,8 +293,10 @@ void destroy_periodic_time(struct period
     if ( !pt->enabled )
         return;
 
+    pt_lock(pt);
     pt->enabled = 0;
     pt->pending_intr_nr = 0;
     list_del(&pt->list);
     kill_timer(&pt->timer);
-}
+    pt_unlock(pt);
+}
diff -r 3b51eebdf9ab -r 093bc9dcbbca xen/arch/x86/mm/hap/hap.c
--- a/xen/arch/x86/mm/hap/hap.c Fri Jun 15 13:33:11 2007 -0600
+++ b/xen/arch/x86/mm/hap/hap.c Sat Jun 16 10:42:06 2007 +0100
@@ -55,14 +55,14 @@
 /* hap code to call when log_dirty is enable. return 0 if no problem found. */
 int hap_enable_log_dirty(struct domain *d)
 {
+    /* turn on PG_log_dirty bit in paging mode */
     hap_lock(d);
-    /* turn on PG_log_dirty bit in paging mode */
     d->arch.paging.mode |= PG_log_dirty;
+    hap_unlock(d);
+
     /* set l1e entries of P2M table to NOT_WRITABLE. */
     p2m_set_flags_global(d, (_PAGE_PRESENT|_PAGE_USER));
-    flush_tlb_all_pge();
-    hap_unlock(d);
-
+    flush_tlb_mask(d->domain_dirty_cpumask);
     return 0;
 }
 
@@ -70,19 +70,20 @@ int hap_disable_log_dirty(struct domain 
 {
     hap_lock(d);
     d->arch.paging.mode &= ~PG_log_dirty;
+    hap_unlock(d);
+
     /* set l1e entries of P2M table with normal mode */
-    p2m_set_flags_global(d, __PAGE_HYPERVISOR|_PAGE_USER);
-    hap_unlock(d);
-    
-    return 1;
+    p2m_set_flags_global(d, __PAGE_HYPERVISOR|_PAGE_USER);    
+    return 0;
 }
 
 void hap_clean_dirty_bitmap(struct domain *d)
 {
     /* mark physical memory as NOT_WRITEABLE and flush the TLB */
     p2m_set_flags_global(d, (_PAGE_PRESENT|_PAGE_USER));
-    flush_tlb_all_pge();
-}
+    flush_tlb_mask(d->domain_dirty_cpumask);
+}
+
 /************************************************/
 /*             HAP SUPPORT FUNCTIONS            */
 /************************************************/
@@ -268,6 +269,7 @@ void hap_install_xen_entries_in_l2h(stru
 {
     struct domain *d = v->domain;
     l2_pgentry_t *sl2e;
+    l3_pgentry_t *p2m;
 
     int i;
 
@@ -290,23 +292,18 @@ void hap_install_xen_entries_in_l2h(stru
         sl2e[l2_table_offset(LINEAR_PT_VIRT_START) + i] =
             l2e_empty();
 
-    if ( paging_mode_translate(d) )
+    /* Install the domain-specific p2m table */
+    ASSERT(pagetable_get_pfn(d->arch.phys_table) != 0);
+    p2m = hap_map_domain_page(pagetable_get_mfn(d->arch.phys_table));
+    for ( i = 0; i < MACHPHYS_MBYTES>>1; i++ )
     {
-        /* Install the domain-specific p2m table */
-        l3_pgentry_t *p2m;
-        ASSERT(pagetable_get_pfn(d->arch.phys_table) != 0);
-        p2m = hap_map_domain_page(pagetable_get_mfn(d->arch.phys_table));
-        for ( i = 0; i < MACHPHYS_MBYTES>>1; i++ )
-        {
-            sl2e[l2_table_offset(RO_MPT_VIRT_START) + i] =
-                (l3e_get_flags(p2m[i]) & _PAGE_PRESENT)
-                ? l2e_from_pfn(mfn_x(_mfn(l3e_get_pfn(p2m[i]))),
-                                      __PAGE_HYPERVISOR)
-                : l2e_empty();
-        }
-        hap_unmap_domain_page(p2m);
-    }
-
+        sl2e[l2_table_offset(RO_MPT_VIRT_START) + i] =
+            (l3e_get_flags(p2m[i]) & _PAGE_PRESENT)
+            ? l2e_from_pfn(mfn_x(_mfn(l3e_get_pfn(p2m[i]))),
+                           __PAGE_HYPERVISOR)
+            : l2e_empty();
+    }
+    hap_unmap_domain_page(p2m);
     hap_unmap_domain_page(sl2e);
 }
 #endif
@@ -565,61 +562,37 @@ void hap_vcpu_init(struct vcpu *v)
 /************************************************/
 /*          HAP PAGING MODE FUNCTIONS           */
 /************************************************/
-/* In theory, hap should not intercept guest page fault. This function can 
- * be recycled to handle host/nested page fault, if needed.
+/* 
+ * HAP guests can handle page faults (in the guest page tables) without
+ * needing any action from Xen, so we should not be intercepting them.
  */
 int hap_page_fault(struct vcpu *v, unsigned long va, 
                    struct cpu_user_regs *regs)
 {
-    HERE_I_AM;
+    HAP_ERROR("Intercepted a guest #PF (%u:%u) with HAP enabled.\n",
+              v->domain->domain_id, v->vcpu_id);
     domain_crash(v->domain);
     return 0;
 }
 
-/* called when guest issues a invlpg request. 
- * Return 1 if need to issue page invalidation on CPU; Return 0 if does not
- * need to do so.
+/* 
+ * HAP guests can handle invlpg without needing any action from Xen, so
+ * should not be intercepting it. 
  */
 int hap_invlpg(struct vcpu *v, unsigned long va)
 {
-    HERE_I_AM;
+    HAP_ERROR("Intercepted a guest INVLPG (%u:%u) with HAP enabled.\n",
+              v->domain->domain_id, v->vcpu_id);
+    domain_crash(v->domain);
     return 0;
 }
 
+/*
+ * HAP guests do not need to take any action on CR3 writes (they are still
+ * intercepted, so that Xen's copy of the guest's CR3 can be kept in sync.)
+ */
 void hap_update_cr3(struct vcpu *v, int do_locking)
 {
-    struct domain *d = v->domain;
-    mfn_t gmfn;
-
-    HERE_I_AM;
-    /* Don't do anything on an uninitialised vcpu */
-    if ( !is_hvm_domain(d) && !v->is_initialised )
-    {
-        ASSERT(v->arch.cr3 == 0);
-        return;
-    }
-
-    if ( do_locking )
-        hap_lock(v->domain);
-    
-    ASSERT(hap_locked_by_me(v->domain));
-    ASSERT(v->arch.paging.mode);
-    
-    gmfn = pagetable_get_mfn(v->arch.guest_table);
-
-    make_cr3(v, pagetable_get_pfn(v->arch.monitor_table));
-    
-    hvm_update_guest_cr3(v, pagetable_get_paddr(v->arch.monitor_table));
-
-    HAP_PRINTK("d=%u v=%u guest_table=%05lx, monitor_table = %05lx\n", 
-               d->domain_id, v->vcpu_id, 
-               (unsigned long)pagetable_get_pfn(v->arch.guest_table),
-               (unsigned long)pagetable_get_pfn(v->arch.monitor_table));
-
-    flush_tlb_mask(d->domain_dirty_cpumask);
-
-    if ( do_locking )
-        hap_unlock(v->domain);
 }
 
 void hap_update_paging_modes(struct vcpu *v)
@@ -647,7 +620,7 @@ void hap_update_paging_modes(struct vcpu
         v->arch.paging.mode = &hap_paging_real_mode;
     }
 
-    v->arch.paging.translate_enabled = !!hvm_paging_enabled(v);    
+    v->arch.paging.translate_enabled = !!hvm_paging_enabled(v);
 
     if ( pagetable_is_null(v->arch.monitor_table) ) {
         mfn_t mmfn = hap_make_monitor_table(v);
@@ -655,7 +628,6 @@ void hap_update_paging_modes(struct vcpu
         make_cr3(v, mfn_x(mmfn));
     }
 
-    flush_tlb_mask(d->domain_dirty_cpumask);
     hap_unlock(d);
 }
 
@@ -702,29 +674,18 @@ hap_write_p2m_entry(struct vcpu *v, unsi
 hap_write_p2m_entry(struct vcpu *v, unsigned long gfn, l1_pgentry_t *p,
                     l1_pgentry_t new, unsigned int level)
 {
-    int do_locking;
-
-    /* This function can be called from two directions (P2M and log dirty). We
-     *  need to make sure this lock has been held or not.
-     */
-    do_locking = !hap_locked_by_me(v->domain);
-
-    if ( do_locking )
-        hap_lock(v->domain);
+    hap_lock(v->domain);
 
     safe_write_pte(p, new);
 #if CONFIG_PAGING_LEVELS == 3
     /* install P2M in monitor table for PAE Xen */
-    if ( level == 3 ) {
+    if ( level == 3 ) 
        /* We have written to the p2m l3: need to sync the per-vcpu
          * copies of it in the monitor tables */
        p2m_install_entry_in_monitors(v->domain, (l3_pgentry_t *)p);
-       
-    }
 #endif
     
-    if ( do_locking )
-        hap_unlock(v->domain);
+    hap_unlock(v->domain);
 }
 
 /* Entry points into this mode of the hap code. */
diff -r 3b51eebdf9ab -r 093bc9dcbbca xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Fri Jun 15 13:33:11 2007 -0600
+++ b/xen/arch/x86/mm/p2m.c     Sat Jun 16 10:42:06 2007 +0100
@@ -32,9 +32,13 @@
 #define P2M_AUDIT     0
 #define P2M_DEBUGGING 1
 
-/* The P2M lock.  This protects all updates to the p2m table.
+/*
+ * The P2M lock.  This protects all updates to the p2m table.
  * Updates are expected to be safe against concurrent reads, 
- * which do *not* require the lock */
+ * which do *not* require the lock.
+ *
+ * Locking discipline: always acquire this lock before the shadow or HAP one
+ */
 
 #define p2m_lock_init(_d)                            \
     do {                                             \
diff -r 3b51eebdf9ab -r 093bc9dcbbca xen/common/kexec.c
--- a/xen/common/kexec.c        Fri Jun 15 13:33:11 2007 -0600
+++ b/xen/common/kexec.c        Sat Jun 16 10:42:06 2007 +0100
@@ -19,6 +19,7 @@
 #include <asm/atomic.h>
 #include <xen/spinlock.h>
 #include <xen/version.h>
+#include <xen/console.h>
 #include <public/elfnote.h>
 
 #ifndef COMPAT
@@ -110,6 +111,8 @@ void kexec_crash(void)
     if ( !test_bit(KEXEC_IMAGE_CRASH_BASE + pos, &kexec_flags) )
         return;
 
+    console_start_sync();
+
     one_cpu_only();
     kexec_crash_save_cpu();
     machine_crash_shutdown();
diff -r 3b51eebdf9ab -r 093bc9dcbbca xen/common/timer.c
--- a/xen/common/timer.c        Fri Jun 15 13:33:11 2007 -0600
+++ b/xen/common/timer.c        Sat Jun 16 10:42:06 2007 +0100
@@ -183,7 +183,7 @@ static inline void timer_lock(struct tim
 
 static inline void timer_unlock(struct timer *timer)
 {
-        spin_unlock(&per_cpu(timers, timer->cpu).lock);
+    spin_unlock(&per_cpu(timers, timer->cpu).lock);
 }
 
 #define timer_unlock_irq(t) \
diff -r 3b51eebdf9ab -r 093bc9dcbbca xen/include/asm-x86/hvm/vcpu.h
--- a/xen/include/asm-x86/hvm/vcpu.h    Fri Jun 15 13:33:11 2007 -0600
+++ b/xen/include/asm-x86/hvm/vcpu.h    Sat Jun 16 10:42:06 2007 +0100
@@ -35,6 +35,9 @@ struct hvm_vcpu {
     struct vlapic       vlapic;
     s64                 cache_tsc_offset;
     u64                 guest_time;
+
+    /* Lock and list for virtual platform timers. */
+    spinlock_t          tm_lock;
     struct list_head    tm_list;
 
     /* For AP startup */
diff -r 3b51eebdf9ab -r 093bc9dcbbca xen/include/asm-x86/hvm/vpt.h
--- a/xen/include/asm-x86/hvm/vpt.h     Fri Jun 15 13:33:11 2007 -0600
+++ b/xen/include/asm-x86/hvm/vpt.h     Sat Jun 16 10:42:06 2007 +0100
@@ -31,7 +31,6 @@
 #include <asm/hvm/vpic.h>
 #include <public/hvm/save.h>
 
-
 struct HPETState;
 struct HPET_timer_fn_info {
     struct HPETState       *hs;
@@ -45,6 +44,7 @@ typedef struct HPETState {
     uint64_t mc_offset;
     struct timer timers[HPET_TIMER_NUM];
     struct HPET_timer_fn_info timer_fn_info[HPET_TIMER_NUM]; 
+    spinlock_t lock;
 } HPETState;
 
 
@@ -80,6 +80,7 @@ typedef struct PITState {
     int64_t count_load_time[3];
     /* irq handling */
     struct periodic_time pt[3];
+    spinlock_t lock;
 } PITState;
 
 typedef struct RTCState {
@@ -93,6 +94,7 @@ typedef struct RTCState {
     struct timer second_timer2;
     struct periodic_time pt;
     int32_t time_offset_seconds;
+    spinlock_t lock;
 } RTCState;
 
 #define FREQUENCE_PMTIMER  3579545  /* Timer should run at 3.579545 MHz */
@@ -102,6 +104,7 @@ typedef struct PMTState {
     uint64_t last_gtime;        /* Last (guest) time we updated the timer */
     uint64_t scale;             /* Multiplier to get from tsc to timer ticks */
     struct timer timer;         /* To make sure we send SCIs */
+    spinlock_t lock;
 } PMTState;
 
 struct pl_time {    /* platform time */
@@ -116,7 +119,6 @@ void pt_freeze_time(struct vcpu *v);
 void pt_freeze_time(struct vcpu *v);
 void pt_thaw_time(struct vcpu *v);
 void pt_update_irq(struct vcpu *v);
-struct periodic_time *is_pt_irq(struct vcpu *v, int vector, int type);
 void pt_intr_post(struct vcpu *v, int vector, int type);
 void pt_reset(struct vcpu *v);
 void pt_migrate(struct vcpu *v);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] Merge with xen-ia64-unstable, Xen patchbot-unstable <=