[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [RFC 4/9] arm64: utilize time accounting



(+ George and Dario)

Hi,

On 11/09/2019 11:32, Andrii Anisov wrote:
From: Andrii Anisov <andrii_anisov@xxxxxxxx>

Call time accounting hooks from appropriate transition points
of the ARM64 code.

Signed-off-by: Andrii Anisov <andrii_anisov@xxxxxxxx>
---
  xen/arch/arm/arm64/entry.S | 39 ++++++++++++++++++++++++++++++++++++---
  xen/arch/arm/domain.c      |  2 ++
  2 files changed, 38 insertions(+), 3 deletions(-)

diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S
index 2d9a271..6fb2fa9 100644
--- a/xen/arch/arm/arm64/entry.S
+++ b/xen/arch/arm/arm64/entry.S
@@ -143,12 +143,21 @@
.endm - .macro exit, hyp, compat
+        .macro  exit, hyp, compat, tacc=1
.if \hyp == 0 /* Guest mode */ + .if \tacc == 1

This is here because you may already be in the HYP state, right?

I noticed in the previous patch you mention that you only handle "re-entry" for the IRQ state.

As you don't have "exit" for states other than IRQ, then I would not consider this is as re-entry. It is more like you transition from one state to another (it happen this is the same state).

The problem of re-entry would be if you take an exception that is going to switch the state. But the concern would be exactly the same if you take an exception that switch the state (such as synchronous hypervisor exception).

This raises the question, how do you account SError interrupt/synchronous 
exception?

+
+        mov     x0, #1
+        bl      tacc_hyp
+
+       .endif
+
          bl      leave_hypervisor_tail /* Disables interrupts on return */

As mentioned in the previous patch, leave_hypervisor_tail() may do some IO emulation that requires to be preemptible. So I don't think this is correct to get that time accounted to the hypervisor.

+ mov x0, #1
+       bl      tacc_guest
          exit_guest \compat
.endif
@@ -205,9 +214,15 @@ hyp_sync:
hyp_irq:
          entry   hyp=1
+        mov     x0,#5
+        bl      tacc_irq_enter
          msr     daifclr, #4
          mov     x0, sp
          bl      do_trap_irq
+
+        mov     x0,#5
+        bl      tacc_irq_exit
+
          exit    hyp=1
guest_sync:
@@ -291,6 +306,9 @@ guest_sync_slowpath:
           * to save them.
           */
          entry   hyp=0, compat=0, save_x0_x1=0
+
+        mov     x0,#1
+        bl      tacc_gsync
          /*
           * The vSError will be checked while 
SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT
           * is not set. If a vSError took place, the initial exception will be
@@ -307,6 +325,10 @@ guest_sync_slowpath:
guest_irq:
          entry   hyp=0, compat=0
+
+        mov     x0,#6
+        bl      tacc_irq_enter
+
          /*
           * The vSError will be checked while 
SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT
           * is not set. If a vSError took place, the initial exception will be
@@ -319,6 +341,8 @@ guest_irq:
          mov     x0, sp
          bl      do_trap_irq
  1:
+       mov     x0,#6
+        bl      tacc_irq_exit
          exit    hyp=0, compat=0
guest_fiq_invalid:
@@ -334,6 +358,9 @@ guest_error:
guest_sync_compat:
          entry   hyp=0, compat=1
+
+        mov     x0,#2
+        bl      tacc_gsync
          /*
           * The vSError will be checked while 
SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT
           * is not set. If a vSError took place, the initial exception will be
@@ -350,6 +377,10 @@ guest_sync_compat:
guest_irq_compat:
          entry   hyp=0, compat=1
+
+        mov     x0,#7
+        bl      tacc_irq_enter
+
          /*
           * The vSError will be checked while 
SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT
           * is not set. If a vSError took place, the initial exception will be
@@ -362,6 +393,8 @@ guest_irq_compat:
          mov     x0, sp
          bl      do_trap_irq
  1:
+        mov     x0,#7
+        bl      tacc_irq_exit
          exit    hyp=0, compat=1
guest_fiq_invalid_compat:
@@ -376,9 +409,9 @@ guest_error_compat:
          exit    hyp=0, compat=1
ENTRY(return_to_new_vcpu32)
-        exit    hyp=0, compat=1
+        exit    hyp=0, compat=1, tacc=0
  ENTRY(return_to_new_vcpu64)
-        exit    hyp=0, compat=0
+        exit    hyp=0, compat=0, tacc=0
return_from_trap:
          msr     daifset, #2 /* Mask interrupts */
diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index a9c4113..53ef630 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -51,11 +51,13 @@ static void do_idle(void)
      process_pending_softirqs();
local_irq_disable();
+    tacc_idle(1);

Any reason to call this before the if and not inside?

      if ( cpu_is_haltable(cpu) )
      {
          dsb(sy);
          wfi();
      }
+    tacc_hyp(2);
      local_irq_enable();
sched_tick_resume();


Cheers,

--
Julien Grall

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.