[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH RFC v12 05/21] Introduce pv guest type and has_hvm_container macros



On 13/09/13 17:25, George Dunlap wrote:
The goal of this patch is to classify conditionals more clearly, as to
whether they relate to pv guests, hvm-only guests, or guests with an
"hvm container" (which will eventually include PVH).

This patch introduces an enum for guest type, as well as two new macros
for switching behavior on and off: is_pv_* and has_hvm_container_*.

In general, a switch should use is_pv_* (or !is_pv_*) if the code in question
relates directly to a PV guest.  Examples include use of pv_vcpu structs or
other behavior directly related to PV domains.

hvm_container is more of a fuzzy concept, but in general:

* Most core HVM behavior will be included in this.  Behavior not
appropriate for PVH mode will be disabled in later patches

* Hypercalls related to HVM guests will *not* be included by default;
functionality needed by PVH guests will be enabled in future patches

* The following functionality are not considered part of the HVM
container, and PVH will end up behaving like PV by default: Event
channel, vtsc offset, code related to emulated timers, nested HVM,
emuirq, PoD

* Some features are left to implement for PVH later: vpmu, shadow mode

Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Signed-off-by: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx>
CC: Jan Beulich <jan.beulich@xxxxxxxx>
CC: Tim Deegan <tim@xxxxxxx>
CC: Keir Fraser <keir@xxxxxxx>
---
  xen/arch/x86/acpi/suspend.c        |    2 +-
  xen/arch/x86/cpu/mcheck/vmce.c     |    6 ++--
  xen/arch/x86/debug.c               |    2 +-
  xen/arch/x86/domain.c              |   54 ++++++++++++++++++------------------
  xen/arch/x86/domain_page.c         |   10 +++----
  xen/arch/x86/domctl.c              |   10 +++----
  xen/arch/x86/efi/runtime.c         |    4 +--
  xen/arch/x86/hvm/vmx/vmcs.c        |    4 +--
  xen/arch/x86/mm.c                  |    6 ++--
  xen/arch/x86/mm/shadow/common.c    |    6 ++--
  xen/arch/x86/mm/shadow/multi.c     |    7 +++--
  xen/arch/x86/physdev.c             |    4 +--
  xen/arch/x86/traps.c               |    5 ++--
  xen/arch/x86/x86_64/traps.c        |    8 +++---
  xen/common/domain.c                |    2 +-
  xen/common/grant_table.c           |    4 +--
  xen/common/kernel.c                |    2 +-
  xen/include/asm-x86/domain.h       |    2 +-
  xen/include/asm-x86/event.h        |    2 +-
  xen/include/asm-x86/guest_access.h |   12 ++++----
  xen/include/asm-x86/guest_pt.h     |    4 +--
  xen/include/xen/sched.h            |   14 ++++++++--
  xen/include/xen/tmem_xen.h         |    2 +-
  23 files changed, 91 insertions(+), 81 deletions(-)

diff --git a/xen/arch/x86/acpi/suspend.c b/xen/arch/x86/acpi/suspend.c
index c690b45..2b7aa3b 100644
--- a/xen/arch/x86/acpi/suspend.c
+++ b/xen/arch/x86/acpi/suspend.c
@@ -78,7 +78,7 @@ void restore_rest_processor_state(void)
      }
/* Maybe load the debug registers. */
-    BUG_ON(is_hvm_vcpu(curr));
+    BUG_ON(has_hvm_container_vcpu(curr));
      if ( !is_idle_vcpu(curr) && curr->arch.debugreg[7] )
      {
          write_debugreg(0, curr->arch.debugreg[0]);
diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
index af3b491..f6c35db 100644
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -83,7 +83,7 @@ int vmce_restore_vcpu(struct vcpu *v, const struct 
hvm_vmce_vcpu *ctxt)
      {
          dprintk(XENLOG_G_ERR, "%s restore: unsupported MCA capabilities"
                  " %#" PRIx64 " for d%d:v%u (supported: %#Lx)\n",
-                is_hvm_vcpu(v) ? "HVM" : "PV", ctxt->caps,
+                has_hvm_container_vcpu(v) ? "HVM" : "PV", ctxt->caps,
                  v->domain->domain_id, v->vcpu_id,
                  guest_mcg_cap & ~MCG_CAP_COUNT);
          return -EPERM;
@@ -357,7 +357,7 @@ int inject_vmce(struct domain *d, int vcpu)
          if ( vcpu != VMCE_INJECT_BROADCAST && vcpu != v->vcpu_id )
              continue;
- if ( (is_hvm_domain(d) ||
+        if ( (has_hvm_container_domain(d) ||
                guest_has_trap_callback(d, v->vcpu_id, TRAP_machine_check)) &&
               !test_and_set_bool(v->mce_pending) )
          {
@@ -439,7 +439,7 @@ int unmmap_broken_page(struct domain *d, mfn_t mfn, 
unsigned long gfn)
      if (!mfn_valid(mfn_x(mfn)))
          return -EINVAL;
- if ( !is_hvm_domain(d) || !paging_mode_hap(d) )
+    if ( !has_hvm_container_domain(d) || !paging_mode_hap(d) )
          return -ENOSYS;
rc = -1;
diff --git a/xen/arch/x86/debug.c b/xen/arch/x86/debug.c
index e67473e..3e21ca8 100644
--- a/xen/arch/x86/debug.c
+++ b/xen/arch/x86/debug.c
@@ -158,7 +158,7 @@ dbg_rw_guest_mem(dbgva_t addr, dbgbyte_t *buf, int len, 
struct domain *dp,
pagecnt = min_t(long, PAGE_SIZE - (addr & ~PAGE_MASK), len); - mfn = (dp->is_hvm
+        mfn = (has_hvm_container_domain(dp)
                 ? dbg_hvm_va2mfn(addr, dp, toaddr, &gfn)
                 : dbg_pv_va2mfn(addr, dp, pgd3));
          if ( mfn == INVALID_MFN )
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index a9e2383..5c38cb1 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -167,7 +167,7 @@ void dump_pageframe_info(struct domain *d)
          spin_unlock(&d->page_alloc_lock);
      }
- if ( is_hvm_domain(d) )
+    if ( has_hvm_container_domain(d) )
          p2m_pod_dump_data(d);
spin_lock(&d->page_alloc_lock);
@@ -385,7 +385,7 @@ int vcpu_initialise(struct vcpu *v)
vmce_init_vcpu(v); - if ( is_hvm_domain(d) )
+    if ( has_hvm_container_domain(d) )
      {
          rc = hvm_vcpu_initialise(v);
          goto done;
@@ -438,7 +438,7 @@ int vcpu_initialise(struct vcpu *v)
      {
          vcpu_destroy_fpu(v);
- if ( !is_hvm_domain(d) )
+        if ( is_pv_domain(d) )
              xfree(v->arch.pv_vcpu.trap_ctxt);
      }
@@ -452,7 +452,7 @@ void vcpu_destroy(struct vcpu *v) vcpu_destroy_fpu(v); - if ( is_hvm_vcpu(v) )
+    if ( has_hvm_container_vcpu(v) )
          hvm_vcpu_destroy(v);
      else
          xfree(v->arch.pv_vcpu.trap_ctxt);
@@ -464,7 +464,7 @@ int arch_domain_create(struct domain *d, unsigned int 
domcr_flags)
      int rc = -ENOMEM;
d->arch.hvm_domain.hap_enabled =
-        is_hvm_domain(d) &&
+        has_hvm_container_domain(d) &&
          hvm_funcs.hap_supported &&
          (domcr_flags & DOMCRF_hap);
      d->arch.hvm_domain.mem_sharing_enabled = 0;
@@ -490,7 +490,7 @@ int arch_domain_create(struct domain *d, unsigned int 
domcr_flags)
                 d->domain_id);
      }
- if ( is_hvm_domain(d) )
+    if ( has_hvm_container_domain(d) )
          rc = create_perdomain_mapping(d, PERDOMAIN_VIRT_START, 0, NULL, NULL);
      else if ( is_idle_domain(d) )
          rc = 0;
@@ -512,7 +512,7 @@ int arch_domain_create(struct domain *d, unsigned int 
domcr_flags)
      mapcache_domain_init(d);
HYPERVISOR_COMPAT_VIRT_START(d) =
-        is_hvm_domain(d) ? ~0u : __HYPERVISOR_COMPAT_VIRT_START;
+        is_pv_domain(d) ? __HYPERVISOR_COMPAT_VIRT_START : ~0u;
if ( (rc = paging_domain_init(d, domcr_flags)) != 0 )
          goto fail;
@@ -554,7 +554,7 @@ int arch_domain_create(struct domain *d, unsigned int 
domcr_flags)
              goto fail;
      }
- if ( is_hvm_domain(d) )
+    if ( has_hvm_container_domain(d) )
      {
          if ( (rc = hvm_domain_initialise(d)) != 0 )
          {
@@ -583,14 +583,14 @@ int arch_domain_create(struct domain *d, unsigned int 
domcr_flags)
      if ( paging_initialised )
          paging_final_teardown(d);
      free_perdomain_mappings(d);
-    if ( !is_hvm_domain(d) )
+    if ( is_pv_domain(d) )
          free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
      return rc;
  }
void arch_domain_destroy(struct domain *d)
  {
-    if ( is_hvm_domain(d) )
+    if ( has_hvm_container_domain(d) )
          hvm_domain_destroy(d);
      else
          xfree(d->arch.pv_domain.e820);
@@ -602,7 +602,7 @@ void arch_domain_destroy(struct domain *d)
      paging_final_teardown(d);
free_perdomain_mappings(d);
-    if ( !is_hvm_domain(d) )
+    if ( is_pv_domain(d) )
          free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
free_xenheap_page(d->shared_info);
@@ -653,7 +653,7 @@ int arch_set_info_guest(
  #define c(fld) (compat ? (c.cmp->fld) : (c.nat->fld))
      flags = c(flags);
- if ( !is_hvm_vcpu(v) )
+    if ( is_pv_vcpu(v) )
      {
          if ( !compat )
          {
@@ -706,7 +706,7 @@ int arch_set_info_guest(
      v->fpu_initialised = !!(flags & VGCF_I387_VALID);
v->arch.flags &= ~TF_kernel_mode;
-    if ( (flags & VGCF_in_kernel) || is_hvm_vcpu(v)/*???*/ )
+    if ( (flags & VGCF_in_kernel) || has_hvm_container_vcpu(v)/*???*/ )
          v->arch.flags |= TF_kernel_mode;
v->arch.vgc_flags = flags;
@@ -721,7 +721,7 @@ int arch_set_info_guest(
      if ( !compat )
      {
          memcpy(&v->arch.user_regs, &c.nat->user_regs, 
sizeof(c.nat->user_regs));
-        if ( !is_hvm_vcpu(v) )
+        if ( is_pv_vcpu(v) )
              memcpy(v->arch.pv_vcpu.trap_ctxt, c.nat->trap_ctxt,
                     sizeof(c.nat->trap_ctxt));
      }
@@ -737,7 +737,7 @@ int arch_set_info_guest(
v->arch.user_regs.eflags |= 2; - if ( is_hvm_vcpu(v) )
+    if ( has_hvm_container_vcpu(v) )
      {
          hvm_set_info_guest(v);
          goto out;
@@ -966,7 +966,7 @@ int arch_set_info_guest(
int arch_vcpu_reset(struct vcpu *v)
  {
-    if ( !is_hvm_vcpu(v) )
+    if ( is_pv_vcpu(v) )
      {
          destroy_gdt(v);
          return vcpu_destroy_pagetables(v);
@@ -1316,7 +1316,7 @@ static void update_runstate_area(struct vcpu *v)
static inline int need_full_gdt(struct vcpu *v)
  {
-    return (!is_hvm_vcpu(v) && !is_idle_vcpu(v));
+    return (is_pv_vcpu(v) && !is_idle_vcpu(v));
  }
static void __context_switch(void)
@@ -1438,9 +1438,9 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
      {
          __context_switch();
- if ( !is_hvm_vcpu(next) &&
+        if ( is_pv_vcpu(next) &&
               (is_idle_vcpu(prev) ||
-              is_hvm_vcpu(prev) ||
+              has_hvm_container_vcpu(prev) ||
                is_pv_32on64_vcpu(prev) != is_pv_32on64_vcpu(next)) )
          {
              uint64_t efer = read_efer();
@@ -1451,13 +1451,13 @@ void context_switch(struct vcpu *prev, struct vcpu 
*next)
          /* Re-enable interrupts before restoring state which may fault. */
          local_irq_enable();
- if ( !is_hvm_vcpu(next) )
+        if ( is_pv_vcpu(next) )
          {
              load_LDT(next);
              load_segments(next);
          }
- set_cpuid_faulting(!is_hvm_vcpu(next) &&
+        set_cpuid_faulting(is_pv_vcpu(next) &&
                             (next->domain->domain_id != 0));
      }
@@ -1540,7 +1540,7 @@ void hypercall_cancel_continuation(void)
      }
      else
      {
-        if ( !is_hvm_vcpu(current) )
+        if ( is_pv_vcpu(current) )
              regs->eip += 2; /* skip re-execute 'syscall' / 'int $xx' */
          else
              current->arch.hvm_vcpu.hcall_preempted = 0;
@@ -1577,12 +1577,12 @@ unsigned long hypercall_create_continuation(
          regs->eax  = op;
/* Ensure the hypercall trap instruction is re-executed. */
-        if ( !is_hvm_vcpu(current) )
+        if ( is_pv_vcpu(current) )
              regs->eip -= 2;  /* re-execute 'syscall' / 'int $xx' */
          else
              current->arch.hvm_vcpu.hcall_preempted = 1;
- if ( !is_hvm_vcpu(current) ?
+        if ( is_pv_vcpu(current) ?
               !is_pv_32on64_vcpu(current) :
               (hvm_guest_x86_mode(current) == 8) )
          {
@@ -1850,7 +1850,7 @@ int domain_relinquish_resources(struct domain *d)
                  return ret;
          }
- if ( !is_hvm_domain(d) )
+        if ( is_pv_domain(d) )
          {
              for_each_vcpu ( d, v )
              {
@@ -1923,7 +1923,7 @@ int domain_relinquish_resources(struct domain *d)
          BUG();
      }
- if ( is_hvm_domain(d) )
+    if ( has_hvm_container_domain(d) )
          hvm_domain_relinquish_resources(d);
return 0;
@@ -2007,7 +2007,7 @@ void vcpu_mark_events_pending(struct vcpu *v)
      if ( already_pending )
          return;
- if ( is_hvm_vcpu(v) )
+    if ( has_hvm_container_vcpu(v) )
          hvm_assert_evtchn_irq(v);
      else
          vcpu_kick(v);
diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c
index bc18263..b9db322 100644
--- a/xen/arch/x86/domain_page.c
+++ b/xen/arch/x86/domain_page.c
@@ -35,7 +35,7 @@ static inline struct vcpu *mapcache_current_vcpu(void)
       * then it means we are running on the idle domain's page table and must
       * therefore use its mapcache.
       */
-    if ( unlikely(pagetable_is_null(v->arch.guest_table)) && !is_hvm_vcpu(v) )
+    if ( unlikely(pagetable_is_null(v->arch.guest_table)) && is_pv_vcpu(v) )
      {
          /* If we really are idling, perform lazy context switch now. */
          if ( (v = idle_vcpu[smp_processor_id()]) == current )
@@ -72,7 +72,7 @@ void *map_domain_page(unsigned long mfn)
  #endif
v = mapcache_current_vcpu();
-    if ( !v || is_hvm_vcpu(v) )
+    if ( !v || has_hvm_container_vcpu(v) )
          return mfn_to_virt(mfn);
dcache = &v->domain->arch.pv_domain.mapcache;
@@ -177,7 +177,7 @@ void unmap_domain_page(const void *ptr)
      ASSERT(va >= MAPCACHE_VIRT_START && va < MAPCACHE_VIRT_END);
v = mapcache_current_vcpu();
-    ASSERT(v && !is_hvm_vcpu(v));
+    ASSERT(v && is_pv_vcpu(v));
dcache = &v->domain->arch.pv_domain.mapcache;
      ASSERT(dcache->inuse);
@@ -244,7 +244,7 @@ int mapcache_domain_init(struct domain *d)
      struct mapcache_domain *dcache = &d->arch.pv_domain.mapcache;
      unsigned int bitmap_pages;
- if ( is_hvm_domain(d) || is_idle_domain(d) )
+    if ( has_hvm_container_domain(d) || is_idle_domain(d) )
          return 0;
#ifdef NDEBUG
@@ -275,7 +275,7 @@ int mapcache_vcpu_init(struct vcpu *v)
      unsigned int ents = d->max_vcpus * MAPCACHE_VCPU_ENTRIES;
      unsigned int nr = PFN_UP(BITS_TO_LONGS(ents) * sizeof(long));
- if ( is_hvm_vcpu(v) || !dcache->inuse )
+    if ( has_hvm_container_vcpu(v) || !dcache->inuse )
          return 0;
if ( ents > dcache->entries )
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index c2a04c4..e0e3fe7 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -800,7 +800,7 @@ long arch_do_domctl(
          if ( domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext )
          {
              evc->size = sizeof(*evc);
-            if ( !is_hvm_domain(d) )
+            if ( is_pv_domain(d) )
              {
                  evc->sysenter_callback_cs      =
                      v->arch.pv_vcpu.sysenter_callback_cs;
@@ -833,7 +833,7 @@ long arch_do_domctl(
              ret = -EINVAL;
              if ( evc->size < offsetof(typeof(*evc), vmce) )
                  goto ext_vcpucontext_out;
-            if ( !is_hvm_domain(d) )
+            if ( is_pv_domain(d) )
              {
                  if ( !is_canonical_address(evc->sysenter_callback_eip) ||
                       !is_canonical_address(evc->syscall32_callback_eip) )
@@ -1237,7 +1237,7 @@ void arch_get_info_guest(struct vcpu *v, 
vcpu_guest_context_u c)
      bool_t compat = is_pv_32on64_domain(v->domain);
  #define c(fld) (!compat ? (c.nat->fld) : (c.cmp->fld))
- if ( is_hvm_vcpu(v) )
+    if ( has_hvm_container_vcpu(v) )
          memset(c.nat, 0, sizeof(*c.nat));
      memcpy(&c.nat->fpu_ctxt, v->arch.fpu_ctxt, sizeof(c.nat->fpu_ctxt));
      c(flags = v->arch.vgc_flags & ~(VGCF_i387_valid|VGCF_in_kernel));
@@ -1248,7 +1248,7 @@ void arch_get_info_guest(struct vcpu *v, 
vcpu_guest_context_u c)
      if ( !compat )
      {
          memcpy(&c.nat->user_regs, &v->arch.user_regs, 
sizeof(c.nat->user_regs));
-        if ( !is_hvm_vcpu(v) )
+        if ( is_pv_vcpu(v) )
              memcpy(c.nat->trap_ctxt, v->arch.pv_vcpu.trap_ctxt,
                     sizeof(c.nat->trap_ctxt));
      }
@@ -1263,7 +1263,7 @@ void arch_get_info_guest(struct vcpu *v, 
vcpu_guest_context_u c)
      for ( i = 0; i < ARRAY_SIZE(v->arch.debugreg); ++i )
          c(debugreg[i] = v->arch.debugreg[i]);
- if ( is_hvm_vcpu(v) )
+    if ( has_hvm_container_vcpu(v) )
      {
          struct segment_register sreg;
diff --git a/xen/arch/x86/efi/runtime.c b/xen/arch/x86/efi/runtime.c
index 37bb535..d7c884b 100644
--- a/xen/arch/x86/efi/runtime.c
+++ b/xen/arch/x86/efi/runtime.c
@@ -52,7 +52,7 @@ unsigned long efi_rs_enter(void)
      /* prevent fixup_page_fault() from doing anything */
      irq_enter();
- if ( !is_hvm_vcpu(current) && !is_idle_vcpu(current) )
+    if ( is_pv_vcpu(current) && !is_idle_vcpu(current) )
      {
          struct desc_ptr gdt_desc = {
              .limit = LAST_RESERVED_GDT_BYTE,
@@ -71,7 +71,7 @@ unsigned long efi_rs_enter(void)
  void efi_rs_leave(unsigned long cr3)
  {
      write_cr3(cr3);
-    if ( !is_hvm_vcpu(current) && !is_idle_vcpu(current) )
+    if ( is_pv_vcpu(current) && !is_idle_vcpu(current) )
      {
          struct desc_ptr gdt_desc = {
              .limit = LAST_RESERVED_GDT_BYTE,
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 0620f87..7087630 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -637,7 +637,7 @@ void vmx_vmcs_exit(struct vcpu *v)
      {
          /* Don't confuse vmx_do_resume (for @v or @current!) */
          vmx_clear_vmcs(v);
-        if ( is_hvm_vcpu(current) )
+        if ( has_hvm_container_vcpu(current) )
              vmx_load_vmcs(current);
spin_unlock(&v->arch.hvm_vmx.vmcs_lock);
@@ -1477,7 +1477,7 @@ static void vmcs_dump(unsigned char ch)
for_each_domain ( d )
      {
-        if ( !is_hvm_domain(d) )
+        if ( !has_hvm_container_domain(d) )
              continue;
          printk("\n>>> Domain %d <<<\n", d->domain_id);
          for_each_vcpu ( d, v )
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index e7f0e13..120599a 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -181,7 +181,7 @@ static uint32_t base_disallow_mask;
       (rangeset_is_empty((d)->iomem_caps) &&                     \
        rangeset_is_empty((d)->arch.ioport_caps) &&               \
        !has_arch_pdevs(d) &&                                     \
-      !is_hvm_domain(d)) ?                                      \
+      is_pv_domain(d)) ?                                        \
       L1_DISALLOW_MASK : (L1_DISALLOW_MASK & ~PAGE_CACHE_ATTRS))
static void __init init_frametable_chunk(void *start, void *end)
@@ -433,7 +433,7 @@ int page_is_ram_type(unsigned long mfn, unsigned long 
mem_type)
unsigned long domain_get_maximum_gpfn(struct domain *d)
  {
-    if ( is_hvm_domain(d) )
+    if ( has_hvm_container_domain(d) )
          return p2m_get_hostp2m(d)->max_mapped_pfn;
      /* NB. PV guests specify nr_pfns rather than max_pfn so we adjust here. */
      return (arch_get_max_pfn(d) ?: 1) - 1;
@@ -2379,7 +2379,7 @@ static int __get_page_type(struct page_info *page, 
unsigned long type,
      {
          /* Special pages should not be accessible from devices. */
          struct domain *d = page_get_owner(page);
-        if ( d && !is_hvm_domain(d) && unlikely(need_iommu(d)) )
+        if ( d && is_pv_domain(d) && unlikely(need_iommu(d)) )
          {
              if ( (x & PGT_type_mask) == PGT_writable_page )
                  iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page)));

This is one in particular I couldn't quite figure out what the PV/HVM split was for, so I couldn't figure out whether to take this conditional for PVH or not.

I think this is probably correct, as in common/grant_table.c, there is in one location under "need_iommu" the following two lines:

/* Shouldn't happen, because you can't use iommu in a HVM domain. */
        BUG_ON(paging_mode_translate(ld));

Not sure exactly what the comment means...

 -George


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.