[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 4 of 4] Add tracing to mem_event and p2m_mem_paging functions


  • To: xen-devel@xxxxxxxxxxxxxxxxxxx
  • From: Olaf Hering <olaf@xxxxxxxxx>
  • Date: Thu, 01 Dec 2011 12:09:20 +0100
  • Delivery-date: Thu, 01 Dec 2011 12:22:43 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xensource.com>

# HG changeset patch
# User Olaf Hering <olaf@xxxxxxxxx>
# Date 1322737677 -3600
# Node ID 0bf827a48a3c9efd43fe04c291b553d2bec46f8b
# Parent  c09ac3717a025a8ead44bbc795fedda715d134c7
Add tracing to mem_event and p2m_mem_paging functions

Maintaining these trace_var calls out-of-tree became a burden and made
debugging paging related issues harder. So putting it into the tree will
ease debugging with xenalyze.

Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>

diff -r c09ac3717a02 -r 0bf827a48a3c xen/arch/x86/mm/mem_event.c
--- a/xen/arch/x86/mm/mem_event.c
+++ b/xen/arch/x86/mm/mem_event.c
@@ -24,6 +24,7 @@
 #include <asm/domain.h>
 #include <xen/event.h>
 #include <xen/wait.h>
+#include <xen/trace.h>
 #include <asm/p2m.h>
 #include <asm/mem_event.h>
 #include <asm/mem_paging.h>
@@ -149,6 +150,13 @@ static int _mem_event_put_request(struct
     mem_event_front_ring_t *front_ring;
     int free_requests;
     RING_IDX req_prod;
+    struct trc_mem_event_put_request T = { .td = d->domain_id };
+    int ret = 0;
+
+    T.d = current->domain->domain_id;
+    T.v = current->vcpu_id;
+    T.ring_bit = med->bit;
+    T.gfn = req->gfn;
 
     mem_event_ring_lock(med);
 
@@ -156,7 +164,7 @@ static int _mem_event_put_request(struct
     /* Requests from foreign domain were claimed in mem_event_check_ring() */
     if ((current->domain == d && free_requests < med->foreign_producers) || 
!free_requests) {
         mem_event_ring_unlock(med);
-        return 0;
+        goto out;
     }
 
     front_ring = &med->front_ring;
@@ -176,7 +184,15 @@ static int _mem_event_put_request(struct
 
     notify_via_xen_event_channel(d, med->xen_port);
 
-    return 1;
+    ret = 1;
+
+out:
+    T.ret = ret;
+    T.room = free_requests;
+    T.foreign_producers = med->foreign_producers + ret;
+    trace_var(TRC_MEM_EVENT_PUT_REQUEST, 1, sizeof(T), &T);
+
+    return ret;
 }
 
 void mem_event_put_request(struct domain *d, struct mem_event_domain *med,
diff -r c09ac3717a02 -r 0bf827a48a3c xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -31,6 +31,7 @@
 #include <asm/hvm/vmx/vmx.h> /* ept_p2m_init() */
 #include <xen/iommu.h>
 #include <xen/wait.h>
+#include <xen/trace.h>
 #include <asm/mem_event.h>
 #include <public/mem_event.h>
 #include <asm/mem_sharing.h>
@@ -215,7 +216,12 @@ static struct p2m_mem_paging_queue *p2m_
 {
     struct p2m_mem_paging_queue_head *h;
     struct p2m_mem_paging_queue *q, *q_match, *q_free;
+    struct trc_mem_p2m_paging_queue T = { .td = d->domain_id };
     
+    T.d = current->domain->domain_id;
+    T.v = current->vcpu_id;
+    T.gfn = gfn;
+
     h = d->arch.hvm_domain.gfn_queue;
     q_match = q_free = NULL;
 
@@ -238,19 +244,37 @@ static struct p2m_mem_paging_queue *p2m_
             printk("wq woken for gfn %u:%u %lx %u %u %u\n", 
current->domain->domain_id, current->vcpu_id, gfn, q_match->index, 
q_match->woken, q_match->waiters);
         q_match->waiters++;
         q_match->gfn = gfn;
+
+        T.waiters = q_match->waiters;
+        T.woken = q_match->woken;
+        T.index = q_match->index;
     }
 
+    trace_var(TRC_MEM_P2M_PAGING_GET_QUEUE, 1, sizeof(T), &T);
+
     if (!q_match)
         printk("No wq_get for gfn %u:%u %lx\n", current->domain->domain_id, 
current->vcpu_id, gfn);
 
+
     spin_unlock(&d->arch.hvm_domain.gfn_lock);
     return q_match;
 }
 
 static void p2m_mem_paging_put_queue(struct domain *d, struct 
p2m_mem_paging_queue *q_match)
 {
+    struct trc_mem_p2m_paging_queue T = { .td = d->domain_id };
+
+    T.d = current->domain->domain_id;
+    T.v = current->vcpu_id;
+    T.gfn = q_match->gfn;
+    T.waiters = q_match->waiters;
+    T.woken = q_match->woken;
+    T.index = q_match->index;
+
     spin_lock(&d->arch.hvm_domain.gfn_lock);
 
+    trace_var(TRC_MEM_P2M_PAGING_PUT_QUEUE, 1, sizeof(T), &T);
+
     if (q_match->waiters == 0)
         printk("wq_put no waiters, gfn %u:%u %lx %u\n", 
current->domain->domain_id, current->vcpu_id, q_match->gfn, q_match->woken);
     else if (--q_match->waiters == 0)
@@ -263,6 +287,11 @@ static void p2m_mem_paging_wake_queue(st
 {
     struct p2m_mem_paging_queue_head *h;
     struct p2m_mem_paging_queue *q, *q_match = NULL;
+    struct trc_mem_p2m_paging_queue T = { .td = d->domain_id };
+
+    T.d = current->domain->domain_id;
+    T.v = current->vcpu_id;
+    T.gfn = gfn;
 
     spin_lock(&d->arch.hvm_domain.gfn_lock);
 
@@ -277,8 +306,15 @@ static void p2m_mem_paging_wake_queue(st
         if (q_match->woken || q_match->waiters == 0)
             printk("Wrong wake for gfn %u:%u %p %lx %u %u\n", 
current->domain->domain_id, current->vcpu_id, q_match, gfn, q_match->woken, 
q_match->waiters);
         q_match->woken++;
+
+        T.waiters = q_match->waiters;
+        T.woken = q_match->woken;
+        T.index = q_match->index;
+
         wake_up_all(&q_match->wq);
     }
+    trace_var(TRC_MEM_P2M_PAGING_WAKE_QUEUE, 1, sizeof(T), &T);
+
     spin_unlock(&d->arch.hvm_domain.gfn_lock);
 }
 
@@ -937,34 +973,45 @@ int p2m_mem_paging_nominate(struct domai
     p2m_access_t a;
     mfn_t mfn;
     int ret;
+    struct trc_mem_p2m_paging T = { .td = d->domain_id };
+
+    T.d = current->domain->domain_id;
+    T.v = current->vcpu_id;
+    T.gfn = gfn;
 
     p2m_lock(p2m);
 
     mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
 
+    T.reason++;
     /* Check if mfn is valid */
     ret = -EINVAL;
     if ( !mfn_valid(mfn) )
         goto out;
 
+    T.reason++;
     /* Check p2m type */
     ret = -EAGAIN;
     if ( !p2m_is_pageable(p2mt) )
         goto out;
 
+    T.reason++;
     /* Check for io memory page */
     if ( is_iomem_page(mfn_x(mfn)) )
         goto out;
 
+    T.reason++;
     /* Check page count and type */
     page = mfn_to_page(mfn);
     if ( (page->count_info & (PGC_count_mask | PGC_allocated)) !=
          (1 | PGC_allocated) )
         goto out;
 
+    T.reason++;
     if ( (page->u.inuse.type_info & PGT_type_mask) != PGT_none )
         goto out;
 
+    T.reason++;
     /* Fix p2m entry */
     set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_out, a);
     audit_p2m(p2m, 1);
@@ -972,6 +1019,10 @@ int p2m_mem_paging_nominate(struct domai
 
  out:
     p2m_unlock(p2m);
+
+    T.mfn = mfn_x(mfn);
+    T.p2mt = p2mt;
+    trace_var(TRC_MEM_P2M_PAGING_NOMINATE, 1, sizeof(T), &T);
     return ret;
 }
 
@@ -1002,6 +1053,11 @@ int p2m_mem_paging_evict(struct domain *
     mfn_t mfn;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
     int ret = -EINVAL;
+    struct trc_mem_p2m_paging T = { .td = d->domain_id };
+
+    T.d = current->domain->domain_id;
+    T.v = current->vcpu_id;
+    T.gfn = gfn;
 
     p2m_lock(p2m);
 
@@ -1010,24 +1066,29 @@ int p2m_mem_paging_evict(struct domain *
     if ( unlikely(!mfn_valid(mfn)) )
         goto out;
 
+    T.reason++;
     /* Allow only nominated pages */
     if ( p2mt != p2m_ram_paging_out )
         goto out;
 
+    T.reason++;
     ret = -EBUSY;
     /* Get the page so it doesn't get modified under Xen's feet */
     page = mfn_to_page(mfn);
     if ( unlikely(!get_page(page, d)) )
         goto out;
 
+    T.reason++;
     /* Check page count and type once more */
     if ( (page->count_info & (PGC_count_mask | PGC_allocated)) !=
          (2 | PGC_allocated) )
         goto out_put;
 
+    T.reason++;
     if ( (page->u.inuse.type_info & PGT_type_mask) != PGT_none )
         goto out_put;
 
+    T.reason++;
     /* Decrement guest domain's ref count of the page */
     if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
         put_page(page);
@@ -1050,6 +1111,12 @@ int p2m_mem_paging_evict(struct domain *
 
  out:
     p2m_unlock(p2m);
+
+    T.flag_evict_fail = !!ret;
+    T.mfn = mfn_x(mfn);
+    T.p2mt = p2mt;
+    trace_var(TRC_MEM_P2M_PAGING_EVICT, 1, sizeof(T), &T);
+
     return ret;
 }
 
@@ -1065,10 +1132,16 @@ int p2m_mem_paging_evict(struct domain *
 void p2m_mem_paging_drop_page(struct domain *d, unsigned long gfn)
 {
     mem_event_request_t req = { .type = MEM_EVENT_TYPE_PAGING, .gfn = gfn };
+    struct trc_mem_p2m_paging T = { .td = d->domain_id };
 
     /* Send release notification to pager */
     req.flags = MEM_EVENT_FLAG_DROP_PAGE;
 
+    T.d = current->domain->domain_id;
+    T.v = current->vcpu_id;
+    T.gfn = gfn;
+    trace_var(TRC_MEM_P2M_PAGING_DROP, 1, sizeof(T), &T);
+
     mem_event_put_request(d, &d->mem_event->paging, &req);
 }
 
@@ -1101,11 +1174,13 @@ void p2m_mem_paging_populate(struct doma
     p2m_access_t a;
     mfn_t mfn;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
-    int put_request = 0;
+    int put_request = 0, ring_full;
+    struct trc_mem_p2m_paging T = { .td = d->domain_id };
 
     /* Check that there's space on the ring for this request */
-    if ( mem_event_check_ring(d, &d->mem_event->paging) )
-        return;
+    ring_full = mem_event_check_ring(d, &d->mem_event->paging);
+    if ( ring_full )
+        goto trace;
 
     /* Fix p2m mapping */
     p2m_lock(p2m);
@@ -1127,11 +1202,24 @@ void p2m_mem_paging_populate(struct doma
     }
     p2m_unlock(p2m);
 
-    /* One request per gfn, guest vcpus go to sleep, foreigners try again */
-    if ( put_request )
-        mem_event_put_request(d, &d->mem_event->paging, &req);
-    else
-        mem_event_put_req_producers(d, &d->mem_event->paging);
+    T.mfn = mfn_x(mfn);
+    T.p2mt = p2mt;
+
+trace:
+    T.d = current->domain->domain_id;
+    T.v = current->vcpu_id;
+    T.gfn = gfn;
+    T.reason = ring_full;
+    T.flag_drop_page = put_request;
+    trace_var(TRC_MEM_P2M_PAGING_POPULATE, 1, sizeof(T), &T);
+
+    if ( !ring_full ) {
+        /* One request per gfn, guest vcpus go to sleep, foreigners try again 
*/
+        if ( put_request )
+            mem_event_put_request(d, &d->mem_event->paging, &req);
+        else
+            mem_event_put_req_producers(d, &d->mem_event->paging);
+    }
 }
 
 /**
@@ -1153,6 +1241,7 @@ int p2m_mem_paging_prep(struct domain *d
     mfn_t mfn;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
     int ret;
+    struct trc_mem_p2m_paging T = { .td = d->domain_id };
 
     p2m_lock(p2m);
 
@@ -1184,6 +1273,15 @@ int p2m_mem_paging_prep(struct domain *d
 
  out:
     p2m_unlock(p2m);
+
+    T.d = current->domain->domain_id;
+    T.v = current->vcpu_id;
+    T.gfn = gfn;
+    T.mfn = mfn_x(mfn);
+    T.p2mt = p2mt;
+    T.reason = ret;
+    trace_var(TRC_MEM_P2M_PAGING_PREP, 1, sizeof(T), &T);
+
     return ret;
 }
 
@@ -1209,6 +1307,7 @@ void p2m_mem_paging_resume(struct domain
     p2m_type_t p2mt;
     p2m_access_t a;
     mfn_t mfn;
+    struct trc_mem_p2m_paging T = { .td = d->domain_id };
 
     /* Pull the response off the ring */
     mem_event_get_response(&d->mem_event->paging, &rsp);
@@ -1230,8 +1329,16 @@ void p2m_mem_paging_resume(struct domain
             audit_p2m(p2m, 1);
         }
         p2m_unlock(p2m);
+        T.mfn = mfn_x(mfn);
+        T.p2mt = p2mt;
     }
 
+    T.d = current->domain->domain_id;
+    T.v = current->vcpu_id;
+    T.gfn = rsp.gfn;
+    T.flag_drop_page = !!(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE);
+    trace_var(TRC_MEM_P2M_PAGING_RESUME, 1, sizeof(T), &T);
+
     /* Wake vcpus waiting for room in the ring */
     mem_event_wake_requesters(&d->mem_event->paging);
 
diff -r c09ac3717a02 -r 0bf827a48a3c xen/include/public/trace.h
--- a/xen/include/public/trace.h
+++ b/xen/include/public/trace.h
@@ -94,6 +94,37 @@
 #define TRC_MEM_POD_ZERO_RECLAIM    (TRC_MEM + 17)
 #define TRC_MEM_POD_SUPERPAGE_SPLINTER (TRC_MEM + 18)
 
+#define TRC_MEM_EVENT_PUT_REQUEST      (TRC_MEM + 19)
+struct trc_mem_event_put_request {
+    unsigned int d:16, v:16;
+    unsigned int td:16, room:16;
+    unsigned int gfn;
+    unsigned int foreign_producers:16, ret:1, ring_bit:5;
+} __attribute__((packed));
+
+#define TRC_MEM_P2M_PAGING_NOMINATE    (TRC_MEM + 20)
+#define TRC_MEM_P2M_PAGING_EVICT       (TRC_MEM + 21)
+#define TRC_MEM_P2M_PAGING_DROP        (TRC_MEM + 22)
+#define TRC_MEM_P2M_PAGING_POPULATE    (TRC_MEM + 23)
+#define TRC_MEM_P2M_PAGING_PREP        (TRC_MEM + 24)
+#define TRC_MEM_P2M_PAGING_RESUME      (TRC_MEM + 25)
+struct trc_mem_p2m_paging {
+    unsigned int d:16, v:16;
+    unsigned int td:16, p2mt:5, reason:5, flag_evict_fail:1, flag_drop_page:1;
+    unsigned int gfn;
+    unsigned int mfn;
+} __attribute__((packed));
+
+#define TRC_MEM_P2M_PAGING_GET_QUEUE   (TRC_MEM + 26)
+#define TRC_MEM_P2M_PAGING_PUT_QUEUE   (TRC_MEM + 27)
+#define TRC_MEM_P2M_PAGING_WAKE_QUEUE  (TRC_MEM + 28)
+struct trc_mem_p2m_paging_queue {
+    unsigned int d:16, v:16;
+    unsigned int td:16, woken:8;
+    unsigned int gfn;
+    unsigned int index:16, waiters:16;
+} __attribute__((packed));
+
 
 #define TRC_PV_HYPERCALL             (TRC_PV +  1)
 #define TRC_PV_TRAP                  (TRC_PV +  3)

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.