[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC PATCH v1 07/10] arch-x86/vpmu.c: store guest registers when domain_id == DOMID_XEN



When Xen profiling is enabled (for HW domain only), then domain_id is
set to DOMID_XEN, and Xen's IP is reported as the sample location.

With VPMU >= 0.2 we can now report more information to help a guest
construct a stacktrace, and store the guest's registers and domain_id
into the new 'struct xen_pmu_hv_stracktrace'.

Privileged (HW domain) guests can then trace themselves, even if the
sample interrupt triggered inside Xen. This is useful if kernel or
userspace stacktrace gather is enabled.

For this to be effective a kernel change is required too, but it is
backwards compatible with old kernels, that:
* would ignore the newly stored data (it is towards the end of the page,
in a previously unused area)
* would report VPMU 0.1, and thus Xen would have xen_pmu_hv_stacktrace ==
  NULL, and not report this information

To avoid stale values the guest_domain_id is always initialized to the
correct value, and ip is set to 0.

Signed-off-by: Edwin Török <edwin.torok@xxxxxxxxx>
---
 xen/arch/x86/cpu/vpmu.c         | 33 ++++++++++++++++++++++++++++-----
 xen/arch/x86/include/asm/vpmu.h |  1 +
 2 files changed, 29 insertions(+), 5 deletions(-)

diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
index 286dc2af5f..770f63f95a 100644
--- a/xen/arch/x86/cpu/vpmu.c
+++ b/xen/arch/x86/cpu/vpmu.c
@@ -170,7 +170,7 @@ static inline void vpmu_convert_regs(struct xen_pmu_regs 
*r, uint64_t *flags,
   if (!is_hvm_vcpu(sampled)) {
     r->ss = cur_regs->ss;
     r->cs = cur_regs->cs;
-    if (!(sampled->arch.flags & TF_kernel_mode))
+    if (flags && !(sampled->arch.flags & TF_kernel_mode))
       *flags |= PMU_SAMPLE_USER;
   } else {
     struct segment_register seg;
@@ -180,7 +180,7 @@ static inline void vpmu_convert_regs(struct xen_pmu_regs 
*r, uint64_t *flags,
     hvm_get_segment_register(sampled, x86_seg_ss, &seg);
     r->ss = seg.sel;
     r->cpl = seg.dpl;
-    if (!(sampled->arch.hvm.guest_cr[0] & X86_CR0_PE))
+    if (flags && !(sampled->arch.hvm.guest_cr[0] & X86_CR0_PE))
       *flags |= PMU_SAMPLE_REAL;
   }
 }
@@ -240,6 +240,14 @@ void vpmu_do_interrupt(void)
         else
             domid = sampled->domain->domain_id;
 
+        if (vpmu->xenpmu_hv_stacktrace)
+        {
+            vpmu->xenpmu_hv_stacktrace->guest_domain_id = domid;
+
+            /* avoid stale values when domid != DOMID_XEN */
+            vpmu->xenpmu_hv_stacktrace->guest.r.regs.ip = 0;
+        }
+
         /* Store appropriate registers in xenpmu_data */
 #ifdef CONFIG_COMPAT
         /* FIXME: 32-bit PVH should go here as well */
@@ -275,6 +283,11 @@ void vpmu_do_interrupt(void)
                       is_hardware_domain(sampling->domain) )
             {
                 cur_regs = regs;
+                if (vpmu->xenpmu_hv_stacktrace)
+                {
+                    
vpmu_convert_regs(&vpmu->xenpmu_hv_stacktrace->guest.r.regs,
+                                      NULL, sampled, guest_cpu_user_regs());
+                }
                 domid = DOMID_XEN;
             }
             else
@@ -546,6 +559,7 @@ static void vpmu_cleanup(struct vcpu *v)
     vpmu_arch_destroy(v);
     xenpmu_data = vpmu->xenpmu_data;
     vpmu->xenpmu_data = NULL;
+    vpmu->xenpmu_hv_stacktrace = NULL;
 
     spin_unlock(&vpmu->vpmu_lock);
 
@@ -572,6 +586,7 @@ static int pvpmu_init(struct domain *d, xen_pmu_params_t 
*params)
     struct vpmu_struct *vpmu;
     struct page_info *page;
     uint64_t gfn = params->val;
+    void *vpmu_page;
 
     if ( (params->vcpu >= d->max_vcpus) || (d->vcpu[params->vcpu] == NULL) )
         return -EINVAL;
@@ -601,7 +616,8 @@ static int pvpmu_init(struct domain *d, xen_pmu_params_t 
*params)
         return -EEXIST;
     }
 
-    v->arch.vpmu.xenpmu_data = __map_domain_page_global(page);
+    vpmu_page = __map_domain_page_global(page);
+    v->arch.vpmu.xenpmu_data = vpmu_page;
     if ( !v->arch.vpmu.xenpmu_data )
     {
         spin_unlock(&vpmu->vpmu_lock);
@@ -609,8 +625,15 @@ static int pvpmu_init(struct domain *d, xen_pmu_params_t 
*params)
         return -ENOMEM;
     }
 
-    if ( vpmu_arch_initialise(v) )
-        put_vpmu(v);
+    if (params->version.maj > 0 || params->version.min >= 2)
+      v->arch.vpmu.xenpmu_hv_stacktrace =
+          (void *)((uint8_t *)vpmu_page + PAGE_SIZE -
+                   sizeof(struct xen_pmu_hv_stacktrace));
+    else
+     v->arch.vpmu.xenpmu_hv_stacktrace = NULL;
+
+    if (vpmu_arch_initialise(v))
+      put_vpmu(v);
 
     spin_unlock(&vpmu->vpmu_lock);
 
diff --git a/xen/arch/x86/include/asm/vpmu.h b/xen/arch/x86/include/asm/vpmu.h
index dae9b43dac..df28f80f0f 100644
--- a/xen/arch/x86/include/asm/vpmu.h
+++ b/xen/arch/x86/include/asm/vpmu.h
@@ -55,6 +55,7 @@ struct vpmu_struct {
     size_t context_size;
     size_t priv_context_size;
     struct xen_pmu_data *xenpmu_data;
+    struct xen_pmu_hv_stacktrace *xenpmu_hv_stacktrace; /* only set if client 
vpmu >= 0.2 */
     spinlock_t vpmu_lock;
 };
 
-- 
2.47.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.