# HG changeset patch
# User yamahata@xxxxxxxxxxxxx
# Date 1199690182 -32400
# Node ID 379184cb560f8da23d1b29012af3e76f2c425a45
# Parent 70146eb02f8aa77424e5a5e9471fd223ba21b6af
clean up of context switch code
PATCHNAME: clean_up_of_context_switch
Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
diff -r 70146eb02f8a -r 379184cb560f xen/arch/ia64/vmx/vmx_init.c
--- a/xen/arch/ia64/vmx/vmx_init.c Fri Dec 28 13:11:34 2007 +0900
+++ b/xen/arch/ia64/vmx/vmx_init.c Mon Jan 07 16:16:22 2008 +0900
@@ -335,6 +335,10 @@ vmx_load_state(struct vcpu *v)
u64 status;
BUG_ON(v != current);
+
+ vmx_load_all_rr(v);
+
+ /* vmx_load_all_rr() pins down v->arch.privregs with both dtr/itr*/
status = ia64_pal_vp_restore((u64 *)v->arch.privregs, 0);
if (status != PAL_STATUS_SUCCESS){
panic_domain(vcpu_regs(v),"Restore vp status failed\n");
@@ -350,6 +354,8 @@ vmx_load_state(struct vcpu *v)
ia64_set_kr(7, v->arch.arch_vmx.vkr[7]);
/* Guest vTLB is not required to be switched explicitly, since
* anchored in vcpu */
+
+ migrate_timer(&v->arch.arch_vmx.vtm.vtm_timer, v->processor);
}
static int
@@ -602,9 +608,7 @@ void vmx_do_resume(struct vcpu *v)
{
ioreq_t *p;
- vmx_load_all_rr(v);
vmx_load_state(v);
- migrate_timer(&v->arch.arch_vmx.vtm.vtm_timer, v->processor);
/* stolen from hvm_do_resume() in arch/x86/hvm/hvm.c */
/* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
diff -r 70146eb02f8a -r 379184cb560f xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c Fri Dec 28 13:11:34 2007 +0900
+++ b/xen/arch/ia64/xen/domain.c Mon Jan 07 16:16:22 2008 +0900
@@ -78,16 +78,6 @@ DEFINE_PER_CPU(struct vcpu *, fp_owner);
#include <xen/sched-if.h>
-static void
-ia64_disable_vhpt_walker(void)
-{
- // disable VHPT. ia64_new_rr7() might cause VHPT
- // fault without this because it flushes dtr[IA64_TR_VHPT]
- // (VHPT_SIZE_LOG2 << 2) is just for avoid
- // Reserved Register/Field fault.
- ia64_set_pta(VHPT_SIZE_LOG2 << 2);
-}
-
static void flush_vtlb_for_context_switch(struct vcpu* prev, struct vcpu* next)
{
int cpu = smp_processor_id();
@@ -165,6 +155,21 @@ static void flush_cache_for_context_swit
}
}
+static void set_current_psr_i_addr(struct vcpu* v)
+{
+ __ia64_per_cpu_var(current_psr_i_addr) =
+ (uint8_t*)(v->domain->arch.shared_info_va +
+ INT_ENABLE_OFFSET(v));
+ __ia64_per_cpu_var(current_psr_ic_addr) = (int *)
+ (v->domain->arch.shared_info_va + XSI_PSR_IC_OFS);
+}
+
+static void clear_current_psr_i_addr(void)
+{
+ __ia64_per_cpu_var(current_psr_i_addr) = NULL;
+ __ia64_per_cpu_var(current_psr_ic_addr) = NULL;
+}
+
static void lazy_fp_switch(struct vcpu *prev, struct vcpu *next)
{
/*
@@ -196,26 +201,28 @@ static void lazy_fp_switch(struct vcpu *
}
}
+static void load_state(struct vcpu *v)
+{
+ load_region_regs(v);
+ ia64_set_pta(vcpu_pta(v));
+ vcpu_load_kernel_regs(v);
+ if (vcpu_pkr_in_use(v))
+ vcpu_pkr_load_regs(v);
+ set_current_psr_i_addr(v);
+}
+
void schedule_tail(struct vcpu *prev)
{
extern char ia64_ivt;
context_saved(prev);
- ia64_disable_vhpt_walker();
if (VMX_DOMAIN(current))
vmx_do_resume(current);
else {
if (VMX_DOMAIN(prev))
ia64_set_iva(&ia64_ivt);
- load_region_regs(current);
- ia64_set_pta(vcpu_pta(current));
- vcpu_load_kernel_regs(current);
- __ia64_per_cpu_var(current_psr_i_addr) =
- (uint8_t*)(current->domain->arch.shared_info_va +
- INT_ENABLE_OFFSET(current));
- __ia64_per_cpu_var(current_psr_ic_addr) = (int *)
- (current->domain->arch.shared_info_va + XSI_PSR_IC_OFS);
+ load_state(current);
migrate_timer(¤t->arch.hlt_timer, current->processor);
}
flush_vtlb_for_context_switch(prev, current);
@@ -242,7 +249,6 @@ void context_switch(struct vcpu *prev, s
}
}
- ia64_disable_vhpt_walker();
lazy_fp_switch(prev, current);
if (prev->arch.dbg_used || next->arch.dbg_used) {
@@ -253,37 +259,31 @@ void context_switch(struct vcpu *prev, s
ia64_load_debug_regs(next->arch.dbr);
}
+ /*
+ * disable VHPT walker.
+ * ia64_switch_to() might cause VHPT fault because it flushes
+ * dtr[IA64_TR_VHPT] and reinsert the mapping with dtr[IA64_TR_STACK].
+ * (VHPT_SIZE_LOG2 << 2) is just for avoiding
+ * Reserved Register/Field fault.
+ */
+ ia64_set_pta(VHPT_SIZE_LOG2 << 2);
prev = ia64_switch_to(next);
/* Note: ia64_switch_to does not return here at vcpu initialization. */
if (VMX_DOMAIN(current)) {
- vmx_load_all_rr(current);
vmx_load_state(current);
- migrate_timer(¤t->arch.arch_vmx.vtm.vtm_timer,
- current->processor);
} else {
- struct domain *nd;
extern char ia64_ivt;
if (VMX_DOMAIN(prev))
ia64_set_iva(&ia64_ivt);
- nd = current->domain;
- if (!is_idle_domain(nd)) {
- load_region_regs(current);
- ia64_set_pta(vcpu_pta(current));
- vcpu_load_kernel_regs(current);
- if (vcpu_pkr_in_use(current))
- vcpu_pkr_load_regs(current);
+ if (!is_idle_vcpu(current)) {
+ load_state(current);
vcpu_set_next_timer(current);
if (vcpu_timer_expired(current))
vcpu_pend_timer(current);
- __ia64_per_cpu_var(current_psr_i_addr) =
- (uint8_t*)(nd->arch.shared_info_va +
- INT_ENABLE_OFFSET(current));
- __ia64_per_cpu_var(current_psr_ic_addr) =
- (int *)(nd->arch.shared_info_va + XSI_PSR_IC_OFS);
/* steal time accounting */
if (!guest_handle_is_null(runstate_guest(current)))
__copy_to_guest(runstate_guest(current), ¤t->runstate,
1);
@@ -292,8 +292,7 @@ void context_switch(struct vcpu *prev, s
* walker. Then all accesses happen within idle context will
* be handled by TR mapping and identity mapping.
*/
- __ia64_per_cpu_var(current_psr_i_addr) = NULL;
- __ia64_per_cpu_var(current_psr_ic_addr) = NULL;
+ clear_current_psr_i_addr();
}
}
local_irq_restore(spsr);
@@ -1710,12 +1709,10 @@ domain_set_shared_info_va (unsigned long
VCPU(v, interrupt_mask_addr) = (unsigned char *)va +
INT_ENABLE_OFFSET(v);
-
- __ia64_per_cpu_var(current_psr_i_addr) =
- (uint8_t*)(va + INT_ENABLE_OFFSET(current));
- __ia64_per_cpu_var(current_psr_ic_addr) = (int *)(va + XSI_PSR_IC_OFS);
+ set_current_psr_i_addr(v);
/* Remap the shared pages. */
+ BUG_ON(VMX_DOMAIN(v));
rc = !set_one_rr(7UL << 61, PSCB(v,rrs[7]));
BUG_ON(rc);
16710_379184cb560f_clean_up_of_context_switch.patch
Description: Text Data
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel
|