[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v8 02/11] viridian: separately allocate domain and vcpu structures


  • To: <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Paul Durrant <paul.durrant@xxxxxxxxxx>
  • Date: Mon, 18 Mar 2019 11:20:50 +0000
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Paul Durrant <paul.durrant@xxxxxxxxxx>, Wei Liu <wei.liu2@xxxxxxxxxx>, Jan Beulich <jbeulich@xxxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>
  • Delivery-date: Mon, 18 Mar 2019 11:21:15 +0000
  • Ironport-data: A9a23:aMR5Yq8aTepZxi0nzUFWDrVtTnXEOmzSOkUsvf5EW+ux9PeyolmlTE 5kKdhwZ+x7Ou5M2aFPB7kMoVbwNY5HmiGtgi+W8w7dWlKCIEdh9I82BT49Kow/ZEBlmoMi+6 XDl88wi3sU7PwM33umeEf75AcFfJhiywnP3EKw8yw3vR8nA9YN7PNZJrcO2VdXN0UWsRiDuh TUmWDTHqGG+qbl3mvjNls1c9Y7O/+uj52n3b0nZy6PCvxo1HZqTTxEeeqQLVWtguGbJoYjge U2C3wMvCDOCOHik7eFP2HkL9Yhh1LuUYHip2q3uwTiH20Z+yt/ZWcbvnpoduGUorBEvSjdRA HDmiuRHy8HymHagDBwe7RbtAWxQGGoHbIVPn+HaQz+d/biAspHUfh/eLWcZ3zy4cgzmHUseL OnIziIc+x1u61xDz8+ggARr/+7ezShaGTYG+s/Iyjz2INhXghSZeKNp/ZVUyfImCgaHycuLP ovXcQIuWbMNoP9+QDUIyEdwtNoTM6jNf7P4tuEjZj5DKY8whOTlIJK4kZ2hZthxBNFF0Ve25 Yz7svm62GqlrLSPEdNAkpmuBGc9lGRmow9tOCfxT8fYeYgCVuMXypuhIqojekXNzau/RYrS3 x7GIAuQ2ND2u/aeJ1VGb8Hpgr24SxFWt+gqXAJb2VdU1wx8ZiSgnsKoTk61FNCLuH3fBZvbw Wnvo8W5oDyg+V3mzi0RxE3lONxB3J8+9X6/6KGNJ8d3m30d8NQahk1sFh9P9mKaUE4SW9fMJ s/J1yL0gD3kpkd0dIZLsYOV4P6KRucE7yOqdrCdvAkh0ZspTGobNM4VF5tPE+iehccpYgETj ytESctWhUIa4b6q9L3WZnwxBtXYEr73MeimOW9UXNxsGwIGTXu8KaEnGKM4z7FAvp3PKVT2A v6ZclJ85SDCShUIEF6Jg6xzpwaGoat/EHTasy3fdlOFAJIWOqfVIS3sRq5CZtNfCPKd49Mma B9ClAPv6w+GgzUL/irDRSFtXpJivoUmopU5n4vpeL/ee0ljKEXefklW8+juPkhWsgzhzyooC 80oK9tF6dXXi2Tdgbgz2hhFZjgunbT0hydOyAq3pXtunPFupdSy6jmwguqkLIeyA/PV1GhYB bJwp8dgcxTLdKr/yztGLVM4FTFCLOjzyvVjTcXFUD4LL/EyRm/ZWQeS4e+aQHQySaQUbDxmT g33gbQvkN08ULXZftq1W5HxDozsvqgBWVNHY8D79+X8f5wuX+9eKpCbOX5M3WtUYFL8jisxq lCve7cOAj4ChP+De6nbtrIBXgmXJDpvWLf8jwBNV2eGNjIZAH8zd+BDDL/FNgE4TrIB84ClJ 6xP5MwYtxVBQN6cJYqO5ijHm5/Zkwcod7pCDJKFJIi+jswKE/FTuolQtAKnltKL6aYmjQO2Z JQ/uxyb0igo3/NcrzkODYcVaNptyp70C0lWm+QNfJL6/MjxXJX6hYlRUDyatY7oSDCs3wkPZ BnTRDsrctiJyO9ucnmjBVe/vt3IWR0D1OE4uOv+1Ksjjnfl2zUsaxlPJCTGZiplIF7vRPmgz t5ddl+7Vj9rM0uUWCx+1Pu3nP7gDIy4Da4/xpUcQlIt0npYY/iQiRQFFNB52/buHjprTm++2 lCuvPQJ+opSshO7OrmXPgKCs1LFuBzfbC8N4x1v3HXidwzfcgQi3pOrgPg7FTQ30rvzzBVjR s6NeDGIj24Xja8TVR47SPlhDtWv7/+7CLodIgWZMSCv1TwUjkpKpHTht6/jFsErP7Li/3Suc /LxcweO/EtkogUZLP2KZn481RgquWXbt0sN+Y5NV0sfNi3Njk+Vj2KBQJg
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Currently the viridian_domain and viridian_vcpu structures are inline in
the hvm_domain and hvm_vcpu structures respectively. Subsequent patches
will need to add sizable extra fields to the viridian structures which
will cause the PAGE_SIZE limit of the overall vcpu structure to be
exceeded. This patch, therefore, uses the new init hooks to separately
allocate the structures and converts the 'viridian' fields in hvm_domain
and hvm_cpu to be pointers to these allocations. These separate allocations
also allow some vcpu and domain pointers to become const.

Ideally, now that they are no longer inline, the allocations of the
viridian structures could be made conditional on whether the toolstack
is going to configure the viridian enlightenments. However the toolstack
is currently unable to convey this information to the domain creation code
so such an enhancement is deferred until that becomes possible.

NOTE: The patch also introduced the 'is_viridian_vcpu' macro to avoid
      introducing a second evaluation of 'is_viridian_domain' with an
      open-coded 'v->domain' argument. This macro will also be further
      used in a subsequent patch.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Reviewed-by: Wei Liu <wei.liu2@xxxxxxxxxx>
Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
---
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: "Roger Pau Monné" <roger.pau@xxxxxxxxxx>

v4:
 - Const-ify some vcpu and domain pointers

v2:
 - use XFREE()
 - expand commit comment to point out why allocations are unconditional
---
 xen/arch/x86/hvm/viridian/private.h  |  2 +-
 xen/arch/x86/hvm/viridian/synic.c    | 46 ++++++++---------
 xen/arch/x86/hvm/viridian/time.c     | 38 +++++++-------
 xen/arch/x86/hvm/viridian/viridian.c | 75 ++++++++++++++++++----------
 xen/include/asm-x86/hvm/domain.h     |  2 +-
 xen/include/asm-x86/hvm/hvm.h        |  4 ++
 xen/include/asm-x86/hvm/vcpu.h       |  2 +-
 xen/include/asm-x86/hvm/viridian.h   | 10 ++--
 8 files changed, 101 insertions(+), 78 deletions(-)

diff --git a/xen/arch/x86/hvm/viridian/private.h 
b/xen/arch/x86/hvm/viridian/private.h
index 398b22f12d..46174f48cd 100644
--- a/xen/arch/x86/hvm/viridian/private.h
+++ b/xen/arch/x86/hvm/viridian/private.h
@@ -89,7 +89,7 @@ void viridian_time_load_domain_ctxt(
 
 void viridian_dump_guest_page(const struct vcpu *v, const char *name,
                               const struct viridian_page *vp);
-void viridian_map_guest_page(struct vcpu *v, struct viridian_page *vp);
+void viridian_map_guest_page(const struct vcpu *v, struct viridian_page *vp);
 void viridian_unmap_guest_page(struct viridian_page *vp);
 
 #endif /* X86_HVM_VIRIDIAN_PRIVATE_H */
diff --git a/xen/arch/x86/hvm/viridian/synic.c 
b/xen/arch/x86/hvm/viridian/synic.c
index a6ebbbc9f5..28eda7798c 100644
--- a/xen/arch/x86/hvm/viridian/synic.c
+++ b/xen/arch/x86/hvm/viridian/synic.c
@@ -28,9 +28,9 @@ typedef union _HV_VP_ASSIST_PAGE
     uint8_t ReservedZBytePadding[PAGE_SIZE];
 } HV_VP_ASSIST_PAGE;
 
-void viridian_apic_assist_set(struct vcpu *v)
+void viridian_apic_assist_set(const struct vcpu *v)
 {
-    HV_VP_ASSIST_PAGE *ptr = v->arch.hvm.viridian.vp_assist.ptr;
+    HV_VP_ASSIST_PAGE *ptr = v->arch.hvm.viridian->vp_assist.ptr;
 
     if ( !ptr )
         return;
@@ -40,40 +40,40 @@ void viridian_apic_assist_set(struct vcpu *v)
      * wrong and the VM will most likely hang so force a crash now
      * to make the problem clear.
      */
-    if ( v->arch.hvm.viridian.apic_assist_pending )
+    if ( v->arch.hvm.viridian->apic_assist_pending )
         domain_crash(v->domain);
 
-    v->arch.hvm.viridian.apic_assist_pending = true;
+    v->arch.hvm.viridian->apic_assist_pending = true;
     ptr->ApicAssist.no_eoi = 1;
 }
 
-bool viridian_apic_assist_completed(struct vcpu *v)
+bool viridian_apic_assist_completed(const struct vcpu *v)
 {
-    HV_VP_ASSIST_PAGE *ptr = v->arch.hvm.viridian.vp_assist.ptr;
+    HV_VP_ASSIST_PAGE *ptr = v->arch.hvm.viridian->vp_assist.ptr;
 
     if ( !ptr )
         return false;
 
-    if ( v->arch.hvm.viridian.apic_assist_pending &&
+    if ( v->arch.hvm.viridian->apic_assist_pending &&
          !ptr->ApicAssist.no_eoi )
     {
         /* An EOI has been avoided */
-        v->arch.hvm.viridian.apic_assist_pending = false;
+        v->arch.hvm.viridian->apic_assist_pending = false;
         return true;
     }
 
     return false;
 }
 
-void viridian_apic_assist_clear(struct vcpu *v)
+void viridian_apic_assist_clear(const struct vcpu *v)
 {
-    HV_VP_ASSIST_PAGE *ptr = v->arch.hvm.viridian.vp_assist.ptr;
+    HV_VP_ASSIST_PAGE *ptr = v->arch.hvm.viridian->vp_assist.ptr;
 
     if ( !ptr )
         return;
 
     ptr->ApicAssist.no_eoi = 0;
-    v->arch.hvm.viridian.apic_assist_pending = false;
+    v->arch.hvm.viridian->apic_assist_pending = false;
 }
 
 int viridian_synic_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val)
@@ -95,12 +95,12 @@ int viridian_synic_wrmsr(struct vcpu *v, uint32_t idx, 
uint64_t val)
 
     case HV_X64_MSR_VP_ASSIST_PAGE:
         /* release any previous mapping */
-        viridian_unmap_guest_page(&v->arch.hvm.viridian.vp_assist);
-        v->arch.hvm.viridian.vp_assist.msr.raw = val;
+        viridian_unmap_guest_page(&v->arch.hvm.viridian->vp_assist);
+        v->arch.hvm.viridian->vp_assist.msr.raw = val;
         viridian_dump_guest_page(v, "VP_ASSIST",
-                                 &v->arch.hvm.viridian.vp_assist);
-        if ( v->arch.hvm.viridian.vp_assist.msr.fields.enabled )
-            viridian_map_guest_page(v, &v->arch.hvm.viridian.vp_assist);
+                                 &v->arch.hvm.viridian->vp_assist);
+        if ( v->arch.hvm.viridian->vp_assist.msr.fields.enabled )
+            viridian_map_guest_page(v, &v->arch.hvm.viridian->vp_assist);
         break;
 
     default:
@@ -132,7 +132,7 @@ int viridian_synic_rdmsr(const struct vcpu *v, uint32_t 
idx, uint64_t *val)
         break;
 
     case HV_X64_MSR_VP_ASSIST_PAGE:
-        *val = v->arch.hvm.viridian.vp_assist.msr.raw;
+        *val = v->arch.hvm.viridian->vp_assist.msr.raw;
         break;
 
     default:
@@ -146,18 +146,18 @@ int viridian_synic_rdmsr(const struct vcpu *v, uint32_t 
idx, uint64_t *val)
 void viridian_synic_save_vcpu_ctxt(const struct vcpu *v,
                                    struct hvm_viridian_vcpu_context *ctxt)
 {
-    ctxt->apic_assist_pending = v->arch.hvm.viridian.apic_assist_pending;
-    ctxt->vp_assist_msr = v->arch.hvm.viridian.vp_assist.msr.raw;
+    ctxt->apic_assist_pending = v->arch.hvm.viridian->apic_assist_pending;
+    ctxt->vp_assist_msr = v->arch.hvm.viridian->vp_assist.msr.raw;
 }
 
 void viridian_synic_load_vcpu_ctxt(
     struct vcpu *v, const struct hvm_viridian_vcpu_context *ctxt)
 {
-    v->arch.hvm.viridian.vp_assist.msr.raw = ctxt->vp_assist_msr;
-    if ( v->arch.hvm.viridian.vp_assist.msr.fields.enabled )
-        viridian_map_guest_page(v, &v->arch.hvm.viridian.vp_assist);
+    v->arch.hvm.viridian->vp_assist.msr.raw = ctxt->vp_assist_msr;
+    if ( v->arch.hvm.viridian->vp_assist.msr.fields.enabled )
+        viridian_map_guest_page(v, &v->arch.hvm.viridian->vp_assist);
 
-    v->arch.hvm.viridian.apic_assist_pending = ctxt->apic_assist_pending;
+    v->arch.hvm.viridian->apic_assist_pending = ctxt->apic_assist_pending;
 }
 
 /*
diff --git a/xen/arch/x86/hvm/viridian/time.c b/xen/arch/x86/hvm/viridian/time.c
index 840a82b457..a7e94aadf0 100644
--- a/xen/arch/x86/hvm/viridian/time.c
+++ b/xen/arch/x86/hvm/viridian/time.c
@@ -27,7 +27,7 @@ typedef struct _HV_REFERENCE_TSC_PAGE
 
 static void dump_reference_tsc(const struct domain *d)
 {
-    const union viridian_page_msr *rt = &d->arch.hvm.viridian.reference_tsc;
+    const union viridian_page_msr *rt = &d->arch.hvm.viridian->reference_tsc;
 
     if ( !rt->fields.enabled )
         return;
@@ -38,7 +38,7 @@ static void dump_reference_tsc(const struct domain *d)
 
 static void update_reference_tsc(struct domain *d, bool initialize)
 {
-    unsigned long gmfn = d->arch.hvm.viridian.reference_tsc.fields.pfn;
+    unsigned long gmfn = d->arch.hvm.viridian->reference_tsc.fields.pfn;
     struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
     HV_REFERENCE_TSC_PAGE *p;
 
@@ -107,7 +107,7 @@ static void update_reference_tsc(struct domain *d, bool 
initialize)
     put_page_and_type(page);
 }
 
-static int64_t raw_trc_val(struct domain *d)
+static int64_t raw_trc_val(const struct domain *d)
 {
     uint64_t tsc;
     struct time_scale tsc_to_ns;
@@ -119,21 +119,19 @@ static int64_t raw_trc_val(struct domain *d)
     return scale_delta(tsc, &tsc_to_ns) / 100ul;
 }
 
-void viridian_time_ref_count_freeze(struct domain *d)
+void viridian_time_ref_count_freeze(const struct domain *d)
 {
-    struct viridian_time_ref_count *trc;
-
-    trc = &d->arch.hvm.viridian.time_ref_count;
+    struct viridian_time_ref_count *trc =
+        &d->arch.hvm.viridian->time_ref_count;
 
     if ( test_and_clear_bit(_TRC_running, &trc->flags) )
         trc->val = raw_trc_val(d) + trc->off;
 }
 
-void viridian_time_ref_count_thaw(struct domain *d)
+void viridian_time_ref_count_thaw(const struct domain *d)
 {
-    struct viridian_time_ref_count *trc;
-
-    trc = &d->arch.hvm.viridian.time_ref_count;
+    struct viridian_time_ref_count *trc =
+        &d->arch.hvm.viridian->time_ref_count;
 
     if ( !d->is_shutting_down &&
          !test_and_set_bit(_TRC_running, &trc->flags) )
@@ -150,9 +148,9 @@ int viridian_time_wrmsr(struct vcpu *v, uint32_t idx, 
uint64_t val)
         if ( !(viridian_feature_mask(d) & HVMPV_reference_tsc) )
             return X86EMUL_EXCEPTION;
 
-        d->arch.hvm.viridian.reference_tsc.raw = val;
+        d->arch.hvm.viridian->reference_tsc.raw = val;
         dump_reference_tsc(d);
-        if ( d->arch.hvm.viridian.reference_tsc.fields.enabled )
+        if ( d->arch.hvm.viridian->reference_tsc.fields.enabled )
             update_reference_tsc(d, true);
         break;
 
@@ -189,13 +187,13 @@ int viridian_time_rdmsr(const struct vcpu *v, uint32_t 
idx, uint64_t *val)
         if ( !(viridian_feature_mask(d) & HVMPV_reference_tsc) )
             return X86EMUL_EXCEPTION;
 
-        *val = d->arch.hvm.viridian.reference_tsc.raw;
+        *val = d->arch.hvm.viridian->reference_tsc.raw;
         break;
 
     case HV_X64_MSR_TIME_REF_COUNT:
     {
         struct viridian_time_ref_count *trc =
-            &d->arch.hvm.viridian.time_ref_count;
+            &d->arch.hvm.viridian->time_ref_count;
 
         if ( !(viridian_feature_mask(d) & HVMPV_time_ref_count) )
             return X86EMUL_EXCEPTION;
@@ -219,17 +217,17 @@ int viridian_time_rdmsr(const struct vcpu *v, uint32_t 
idx, uint64_t *val)
 void viridian_time_save_domain_ctxt(
     const struct domain *d, struct hvm_viridian_domain_context *ctxt)
 {
-    ctxt->time_ref_count = d->arch.hvm.viridian.time_ref_count.val;
-    ctxt->reference_tsc = d->arch.hvm.viridian.reference_tsc.raw;
+    ctxt->time_ref_count = d->arch.hvm.viridian->time_ref_count.val;
+    ctxt->reference_tsc = d->arch.hvm.viridian->reference_tsc.raw;
 }
 
 void viridian_time_load_domain_ctxt(
     struct domain *d, const struct hvm_viridian_domain_context *ctxt)
 {
-    d->arch.hvm.viridian.time_ref_count.val = ctxt->time_ref_count;
-    d->arch.hvm.viridian.reference_tsc.raw = ctxt->reference_tsc;
+    d->arch.hvm.viridian->time_ref_count.val = ctxt->time_ref_count;
+    d->arch.hvm.viridian->reference_tsc.raw = ctxt->reference_tsc;
 
-    if ( d->arch.hvm.viridian.reference_tsc.fields.enabled )
+    if ( d->arch.hvm.viridian->reference_tsc.fields.enabled )
         update_reference_tsc(d, false);
 }
 
diff --git a/xen/arch/x86/hvm/viridian/viridian.c 
b/xen/arch/x86/hvm/viridian/viridian.c
index 5b0eb8a8c7..7839718ef4 100644
--- a/xen/arch/x86/hvm/viridian/viridian.c
+++ b/xen/arch/x86/hvm/viridian/viridian.c
@@ -146,7 +146,7 @@ void cpuid_viridian_leaves(const struct vcpu *v, uint32_t 
leaf,
          * Hypervisor information, but only if the guest has set its
          * own version number.
          */
-        if ( d->arch.hvm.viridian.guest_os_id.raw == 0 )
+        if ( d->arch.hvm.viridian->guest_os_id.raw == 0 )
             break;
         res->a = viridian_build;
         res->b = ((uint32_t)viridian_major << 16) | viridian_minor;
@@ -191,8 +191,8 @@ void cpuid_viridian_leaves(const struct vcpu *v, uint32_t 
leaf,
 
     case 4:
         /* Recommended hypercall usage. */
-        if ( (d->arch.hvm.viridian.guest_os_id.raw == 0) ||
-             (d->arch.hvm.viridian.guest_os_id.fields.os < 4) )
+        if ( (d->arch.hvm.viridian->guest_os_id.raw == 0) ||
+             (d->arch.hvm.viridian->guest_os_id.fields.os < 4) )
             break;
         res->a = CPUID4A_RELAX_TIMER_INT;
         if ( viridian_feature_mask(d) & HVMPV_hcall_remote_tlb_flush )
@@ -224,7 +224,7 @@ static void dump_guest_os_id(const struct domain *d)
 {
     const union viridian_guest_os_id_msr *goi;
 
-    goi = &d->arch.hvm.viridian.guest_os_id;
+    goi = &d->arch.hvm.viridian->guest_os_id;
 
     printk(XENLOG_G_INFO
            "d%d: VIRIDIAN GUEST_OS_ID: vendor: %x os: %x major: %x minor: %x 
sp: %x build: %x\n",
@@ -238,7 +238,7 @@ static void dump_hypercall(const struct domain *d)
 {
     const union viridian_page_msr *hg;
 
-    hg = &d->arch.hvm.viridian.hypercall_gpa;
+    hg = &d->arch.hvm.viridian->hypercall_gpa;
 
     printk(XENLOG_G_INFO "d%d: VIRIDIAN HYPERCALL: enabled: %x pfn: %lx\n",
            d->domain_id,
@@ -247,7 +247,7 @@ static void dump_hypercall(const struct domain *d)
 
 static void enable_hypercall_page(struct domain *d)
 {
-    unsigned long gmfn = d->arch.hvm.viridian.hypercall_gpa.fields.pfn;
+    unsigned long gmfn = d->arch.hvm.viridian->hypercall_gpa.fields.pfn;
     struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
     uint8_t *p;
 
@@ -288,14 +288,14 @@ int guest_wrmsr_viridian(struct vcpu *v, uint32_t idx, 
uint64_t val)
     switch ( idx )
     {
     case HV_X64_MSR_GUEST_OS_ID:
-        d->arch.hvm.viridian.guest_os_id.raw = val;
+        d->arch.hvm.viridian->guest_os_id.raw = val;
         dump_guest_os_id(d);
         break;
 
     case HV_X64_MSR_HYPERCALL:
-        d->arch.hvm.viridian.hypercall_gpa.raw = val;
+        d->arch.hvm.viridian->hypercall_gpa.raw = val;
         dump_hypercall(d);
-        if ( d->arch.hvm.viridian.hypercall_gpa.fields.enabled )
+        if ( d->arch.hvm.viridian->hypercall_gpa.fields.enabled )
             enable_hypercall_page(d);
         break;
 
@@ -317,10 +317,10 @@ int guest_wrmsr_viridian(struct vcpu *v, uint32_t idx, 
uint64_t val)
     case HV_X64_MSR_CRASH_P3:
     case HV_X64_MSR_CRASH_P4:
         BUILD_BUG_ON(HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0 >=
-                     ARRAY_SIZE(v->arch.hvm.viridian.crash_param));
+                     ARRAY_SIZE(v->arch.hvm.viridian->crash_param));
 
         idx -= HV_X64_MSR_CRASH_P0;
-        v->arch.hvm.viridian.crash_param[idx] = val;
+        v->arch.hvm.viridian->crash_param[idx] = val;
         break;
 
     case HV_X64_MSR_CRASH_CTL:
@@ -337,11 +337,11 @@ int guest_wrmsr_viridian(struct vcpu *v, uint32_t idx, 
uint64_t val)
         spin_unlock(&d->shutdown_lock);
 
         gprintk(XENLOG_WARNING, "VIRIDIAN CRASH: %lx %lx %lx %lx %lx\n",
-                v->arch.hvm.viridian.crash_param[0],
-                v->arch.hvm.viridian.crash_param[1],
-                v->arch.hvm.viridian.crash_param[2],
-                v->arch.hvm.viridian.crash_param[3],
-                v->arch.hvm.viridian.crash_param[4]);
+                v->arch.hvm.viridian->crash_param[0],
+                v->arch.hvm.viridian->crash_param[1],
+                v->arch.hvm.viridian->crash_param[2],
+                v->arch.hvm.viridian->crash_param[3],
+                v->arch.hvm.viridian->crash_param[4]);
         break;
     }
 
@@ -364,11 +364,11 @@ int guest_rdmsr_viridian(const struct vcpu *v, uint32_t 
idx, uint64_t *val)
     switch ( idx )
     {
     case HV_X64_MSR_GUEST_OS_ID:
-        *val = d->arch.hvm.viridian.guest_os_id.raw;
+        *val = d->arch.hvm.viridian->guest_os_id.raw;
         break;
 
     case HV_X64_MSR_HYPERCALL:
-        *val = d->arch.hvm.viridian.hypercall_gpa.raw;
+        *val = d->arch.hvm.viridian->hypercall_gpa.raw;
         break;
 
     case HV_X64_MSR_VP_INDEX:
@@ -393,10 +393,10 @@ int guest_rdmsr_viridian(const struct vcpu *v, uint32_t 
idx, uint64_t *val)
     case HV_X64_MSR_CRASH_P3:
     case HV_X64_MSR_CRASH_P4:
         BUILD_BUG_ON(HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0 >=
-                     ARRAY_SIZE(v->arch.hvm.viridian.crash_param));
+                     ARRAY_SIZE(v->arch.hvm.viridian->crash_param));
 
         idx -= HV_X64_MSR_CRASH_P0;
-        *val = v->arch.hvm.viridian.crash_param[idx];
+        *val = v->arch.hvm.viridian->crash_param[idx];
         break;
 
     case HV_X64_MSR_CRASH_CTL:
@@ -419,17 +419,33 @@ int guest_rdmsr_viridian(const struct vcpu *v, uint32_t 
idx, uint64_t *val)
 
 int viridian_vcpu_init(struct vcpu *v)
 {
+    ASSERT(!v->arch.hvm.viridian);
+    v->arch.hvm.viridian = xzalloc(struct viridian_vcpu);
+    if ( !v->arch.hvm.viridian )
+        return -ENOMEM;
+
     return 0;
 }
 
 int viridian_domain_init(struct domain *d)
 {
+    ASSERT(!d->arch.hvm.viridian);
+    d->arch.hvm.viridian = xzalloc(struct viridian_domain);
+    if ( !d->arch.hvm.viridian )
+        return -ENOMEM;
+
     return 0;
 }
 
 void viridian_vcpu_deinit(struct vcpu *v)
 {
-    viridian_synic_wrmsr(v, HV_X64_MSR_VP_ASSIST_PAGE, 0);
+    if ( !v->arch.hvm.viridian )
+        return;
+
+    if ( is_viridian_vcpu(v) )
+        viridian_synic_wrmsr(v, HV_X64_MSR_VP_ASSIST_PAGE, 0);
+
+    XFREE(v->arch.hvm.viridian);
 }
 
 void viridian_domain_deinit(struct domain *d)
@@ -438,6 +454,11 @@ void viridian_domain_deinit(struct domain *d)
 
     for_each_vcpu ( d, v )
         viridian_vcpu_deinit(v);
+
+    if ( !d->arch.hvm.viridian )
+        return;
+
+    XFREE(d->arch.hvm.viridian);
 }
 
 /*
@@ -591,7 +612,7 @@ void viridian_dump_guest_page(const struct vcpu *v, const 
char *name,
            v, name, (unsigned long)vp->msr.fields.pfn);
 }
 
-void viridian_map_guest_page(struct vcpu *v, struct viridian_page *vp)
+void viridian_map_guest_page(const struct vcpu *v, struct viridian_page *vp)
 {
     struct domain *d = v->domain;
     unsigned long gmfn = vp->msr.fields.pfn;
@@ -645,8 +666,8 @@ static int viridian_save_domain_ctxt(struct vcpu *v,
 {
     const struct domain *d = v->domain;
     struct hvm_viridian_domain_context ctxt = {
-        .hypercall_gpa  = d->arch.hvm.viridian.hypercall_gpa.raw,
-        .guest_os_id    = d->arch.hvm.viridian.guest_os_id.raw,
+        .hypercall_gpa = d->arch.hvm.viridian->hypercall_gpa.raw,
+        .guest_os_id = d->arch.hvm.viridian->guest_os_id.raw,
     };
 
     if ( !is_viridian_domain(d) )
@@ -665,8 +686,8 @@ static int viridian_load_domain_ctxt(struct domain *d,
     if ( hvm_load_entry_zeroextend(VIRIDIAN_DOMAIN, h, &ctxt) != 0 )
         return -EINVAL;
 
-    d->arch.hvm.viridian.hypercall_gpa.raw  = ctxt.hypercall_gpa;
-    d->arch.hvm.viridian.guest_os_id.raw    = ctxt.guest_os_id;
+    d->arch.hvm.viridian->hypercall_gpa.raw = ctxt.hypercall_gpa;
+    d->arch.hvm.viridian->guest_os_id.raw = ctxt.guest_os_id;
 
     viridian_time_load_domain_ctxt(d, &ctxt);
 
@@ -680,7 +701,7 @@ static int viridian_save_vcpu_ctxt(struct vcpu *v, 
hvm_domain_context_t *h)
 {
     struct hvm_viridian_vcpu_context ctxt = {};
 
-    if ( !is_viridian_domain(v->domain) )
+    if ( !is_viridian_vcpu(v) )
         return 0;
 
     viridian_synic_save_vcpu_ctxt(v, &ctxt);
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index 3e7331817f..6c7c4f5aa6 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -154,7 +154,7 @@ struct hvm_domain {
     /* hypervisor intercepted msix table */
     struct list_head       msixtbl_list;
 
-    struct viridian_domain viridian;
+    struct viridian_domain *viridian;
 
     bool_t                 hap_enabled;
     bool_t                 mem_sharing_enabled;
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 53ffebb2c5..37c3567a57 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -463,6 +463,9 @@ static inline bool hvm_get_guest_bndcfgs(struct vcpu *v, 
u64 *val)
 #define is_viridian_domain(d) \
     (is_hvm_domain(d) && (viridian_feature_mask(d) & HVMPV_base_freq))
 
+#define is_viridian_vcpu(v) \
+    is_viridian_domain((v)->domain)
+
 #define has_viridian_time_ref_count(d) \
     (is_viridian_domain(d) && (viridian_feature_mask(d) & 
HVMPV_time_ref_count))
 
@@ -762,6 +765,7 @@ static inline bool 
hvm_has_set_descriptor_access_exiting(void)
 }
 
 #define is_viridian_domain(d) ((void)(d), false)
+#define is_viridian_vcpu(v) ((void)(v), false)
 #define has_viridian_time_ref_count(d) ((void)(d), false)
 #define hvm_long_mode_active(v) ((void)(v), false)
 #define hvm_get_guest_time(v) ((void)(v), 0)
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 6c84d5a5a6..d1589f3a96 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -205,7 +205,7 @@ struct hvm_vcpu {
     /* Pending hw/sw interrupt (.vector = -1 means nothing pending). */
     struct x86_event     inject_event;
 
-    struct viridian_vcpu viridian;
+    struct viridian_vcpu *viridian;
 };
 
 #endif /* __ASM_X86_HVM_VCPU_H__ */
diff --git a/xen/include/asm-x86/hvm/viridian.h 
b/xen/include/asm-x86/hvm/viridian.h
index f072838955..c562424332 100644
--- a/xen/include/asm-x86/hvm/viridian.h
+++ b/xen/include/asm-x86/hvm/viridian.h
@@ -77,8 +77,8 @@ int guest_rdmsr_viridian(const struct vcpu *v, uint32_t idx, 
uint64_t *val);
 int
 viridian_hypercall(struct cpu_user_regs *regs);
 
-void viridian_time_ref_count_freeze(struct domain *d);
-void viridian_time_ref_count_thaw(struct domain *d);
+void viridian_time_ref_count_freeze(const struct domain *d);
+void viridian_time_ref_count_thaw(const struct domain *d);
 
 int viridian_vcpu_init(struct vcpu *v);
 int viridian_domain_init(struct domain *d);
@@ -86,9 +86,9 @@ int viridian_domain_init(struct domain *d);
 void viridian_vcpu_deinit(struct vcpu *v);
 void viridian_domain_deinit(struct domain *d);
 
-void viridian_apic_assist_set(struct vcpu *v);
-bool viridian_apic_assist_completed(struct vcpu *v);
-void viridian_apic_assist_clear(struct vcpu *v);
+void viridian_apic_assist_set(const struct vcpu *v);
+bool viridian_apic_assist_completed(const struct vcpu *v);
+void viridian_apic_assist_clear(const struct vcpu *v);
 
 #endif /* __ASM_X86_HVM_VIRIDIAN_H__ */
 
-- 
2.20.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.