[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2] x86/HVM: p2m_ram_ro is incompatible with device pass-through



The write-discard property of the type can't be represented in IOMMU
page table entries. Make sure the respective checks / tracking can't
race, by utilizing the domain lock. The other sides of the sharing/
paging/log-dirty exclusion checks should subsequently perhaps also be
put under that lock then.

Take the opportunity and also convert neighboring bool_t to bool in
struct hvm_domain.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v2: Don't set p2m_ram_ro_used when failing the request.

--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -255,16 +255,33 @@ static int set_mem_type(struct domain *d
  
      mem_type = array_index_nospec(data->mem_type, ARRAY_SIZE(memtype));
  
-    if ( mem_type == HVMMEM_ioreq_server )
+    switch ( mem_type )
      {
          unsigned int flags;
  
+    case HVMMEM_ioreq_server:
          if ( !hap_enabled(d) )
              return -EOPNOTSUPP;
  
          /* Do not change to HVMMEM_ioreq_server if no ioreq server mapped. */
          if ( !p2m_get_ioreq_server(d, &flags) )
              return -EINVAL;
+
+        break;
+
+    case HVMMEM_ram_ro:
+        /* p2m_ram_ro can't be represented in IOMMU mappings. */
+        domain_lock(d);
+        if ( has_iommu_pt(d) )
+            rc = -EXDEV;
+        else
+            d->arch.hvm.p2m_ram_ro_used = true;
+        domain_unlock(d);
+
+        if ( rc )
+            return rc;
+
+        break;
      }
  
      while ( iter < data->nr )
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -1448,17 +1448,36 @@ static int assign_device(struct domain *
      if ( !iommu_enabled || !hd->platform_ops )
          return 0;
  
-    /* Prevent device assign if mem paging or mem sharing have been
-     * enabled for this domain */
-    if ( unlikely(d->arch.hvm.mem_sharing_enabled ||
-                  vm_event_check_ring(d->vm_event_paging) ||
+    domain_lock(d);
+
+    /*
+     * Prevent device assignment if any of
+     * - mem paging
+     * - mem sharing
+     * - the p2m_ram_ro type
+     * - global log-dirty mode
+     * are in use by this domain.
+     */
+    if ( unlikely(vm_event_check_ring(d->vm_event_paging) ||
+#ifdef CONFIG_HVM
+                  (is_hvm_domain(d) &&
+                   (d->arch.hvm.mem_sharing_enabled ||
+                    d->arch.hvm.p2m_ram_ro_used)) ||
+#endif
                    p2m_get_hostp2m(d)->global_logdirty) )
+    {
+        domain_unlock(d);
          return -EXDEV;
+    }
  
      if ( !pcidevs_trylock() )
+    {
+        domain_unlock(d);
          return -ERESTART;
+    }
  
      rc = iommu_construct(d);
+    domain_unlock(d);
      if ( rc )
      {
          pcidevs_unlock();
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -156,10 +156,11 @@ struct hvm_domain {
  
      struct viridian_domain *viridian;
  
-    bool_t                 hap_enabled;
-    bool_t                 mem_sharing_enabled;
-    bool_t                 qemu_mapcache_invalidate;
-    bool_t                 is_s3_suspended;
+    bool                   hap_enabled;
+    bool                   mem_sharing_enabled;
+    bool                   p2m_ram_ro_used;
+    bool                   qemu_mapcache_invalidate;
+    bool                   is_s3_suspended;
  
      /*
       * TSC value that VCPUs use to calculate their tsc_offset value.
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.