[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 4/8] x86/SVM: Add vcpu scheduling support for AVIC



From: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>

Add hooks to manage AVIC data structure during vcpu scheduling.

Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
Signed-off-by: Janakarajan Natarajan <Janakarajan.Natarajan@xxxxxxx>
---
 xen/arch/x86/hvm/svm/avic.c | 54 +++++++++++++++++++++++++++++++++++++++++++++
 xen/arch/x86/hvm/svm/svm.c  | 10 +++++++++
 2 files changed, 64 insertions(+)

diff --git a/xen/arch/x86/hvm/svm/avic.c b/xen/arch/x86/hvm/svm/avic.c
index e112469774..7a25d83954 100644
--- a/xen/arch/x86/hvm/svm/avic.c
+++ b/xen/arch/x86/hvm/svm/avic.c
@@ -40,6 +40,7 @@
 
 #define AVIC_UNACCEL_ACCESS_OFFSET_MASK    0xFF0
 
+#define IS_RUNNING_BIT        62
 /*
  * Note:
  * Currently, svm-avic mode is not supported with nested virtualization.
@@ -63,6 +64,54 @@ avic_get_physical_id_entry(struct svm_domain *d, unsigned 
int index)
     return &d->avic_physical_id_table[index];
 }
 
+static void avic_vcpu_load(struct vcpu *v)
+{
+    unsigned long tmp;
+    struct arch_svm_struct *s = &v->arch.hvm_svm;
+    int h_phy_apic_id;
+    struct avic_physical_id_entry *entry = (struct avic_physical_id_entry 
*)&tmp;
+
+    ASSERT(!test_bit(_VPF_blocked, &v->pause_flags));
+
+    /*
+     * Note: APIC ID = 0xff is used for broadcast.
+     *       APIC ID > 0xff is reserved.
+     */
+    h_phy_apic_id = cpu_data[v->processor].apicid;
+    ASSERT(h_phy_apic_id < AVIC_PHY_APIC_ID_MAX);
+
+    tmp = read_atomic((u64*)(s->avic_last_phy_id));
+    entry->host_phy_apic_id = h_phy_apic_id;
+    entry->is_running = 1;
+    write_atomic((u64*)(s->avic_last_phy_id), tmp);
+}
+
+static void avic_vcpu_unload(struct vcpu *v)
+{
+    struct arch_svm_struct *s = &v->arch.hvm_svm;
+
+    clear_bit(IS_RUNNING_BIT, (u64*)(s->avic_last_phy_id));
+}
+
+static void avic_vcpu_resume(struct vcpu *v)
+{
+    struct arch_svm_struct *s = &v->arch.hvm_svm;
+
+    ASSERT(svm_avic_vcpu_enabled(v));
+    ASSERT(!test_bit(_VPF_blocked, &v->pause_flags));
+
+    set_bit(IS_RUNNING_BIT, (u64*)(s->avic_last_phy_id));
+}
+
+static void avic_vcpu_block(struct vcpu *v)
+{
+    struct arch_svm_struct *s = &v->arch.hvm_svm;
+
+    ASSERT(svm_avic_vcpu_enabled(v));
+
+    clear_bit(IS_RUNNING_BIT, (u64*)(s->avic_last_phy_id));
+}
+
 int svm_avic_dom_init(struct domain *d)
 {
     int ret = 0;
@@ -106,6 +155,11 @@ int svm_avic_dom_init(struct domain *d)
 
     spin_lock_init(&d->arch.hvm_domain.svm.avic_dfr_mode_lock);
 
+    d->arch.hvm_domain.pi_ops.switch_from = avic_vcpu_unload;
+    d->arch.hvm_domain.pi_ops.switch_to = avic_vcpu_load;
+    d->arch.hvm_domain.pi_ops.vcpu_block = avic_vcpu_block;
+    d->arch.hvm_domain.pi_ops.do_resume = avic_vcpu_resume;
+
     return ret;
  err_out:
     svm_avic_dom_destroy(d);
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index c47042bf6b..641ad0dbc1 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1070,6 +1070,10 @@ static void svm_ctxt_switch_from(struct vcpu *v)
     svm_tsc_ratio_save(v);
 
     svm_sync_vmcb(v);
+
+    if ( v->domain->arch.hvm_domain.pi_ops.switch_from )
+        v->domain->arch.hvm_domain.pi_ops.switch_from(v);
+
     svm_vmload_pa(per_cpu(host_vmcb, cpu));
 
     /* Resume use of ISTs now that the host TR is reinstated. */
@@ -1102,6 +1106,9 @@ static void svm_ctxt_switch_to(struct vcpu *v)
     svm_lwp_load(v);
     svm_tsc_ratio_load(v);
 
+    if ( v->domain->arch.hvm_domain.pi_ops.switch_to )
+        v->domain->arch.hvm_domain.pi_ops.switch_to(v);
+
     if ( cpu_has_rdtscp )
         wrmsr_tsc_aux(hvm_msr_tsc_aux(v));
 }
@@ -1148,6 +1155,9 @@ static void noreturn svm_do_resume(struct vcpu *v)
         vmcb_set_vintr(vmcb, intr);
     }
 
+    if ( v->domain->arch.hvm_domain.pi_ops.do_resume )
+        v->domain->arch.hvm_domain.pi_ops.do_resume(v);
+
     hvm_do_resume(v);
 
     reset_stack_and_jump(svm_asm_do_resume);
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.