|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC v12 13/21] pvh: Support read_segment_register for PVH
This will be necessary to do PV-style emulated operations for PVH guests.
Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Signed-off-by: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx>
CC: Jan Beulich <jan.beulich@xxxxxxxx>
CC: Tim Deegan <tim@xxxxxxx>
CC: Keir Fraser <keir@xxxxxxx>
---
xen/arch/x86/domain.c | 8 ++++----
xen/arch/x86/hvm/vmx/vmx.c | 40 ++++++++++++++++++++++++++++++++++++++++
xen/arch/x86/traps.c | 26 ++++++++++++--------------
xen/arch/x86/x86_64/traps.c | 16 ++++++++--------
xen/include/asm-x86/hvm/hvm.h | 6 ++++++
xen/include/asm-x86/system.h | 19 +++++++++++++++----
6 files changed, 85 insertions(+), 30 deletions(-)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index c75798b..02a6479 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1217,10 +1217,10 @@ static void save_segments(struct vcpu *v)
struct cpu_user_regs *regs = &v->arch.user_regs;
unsigned int dirty_segment_mask = 0;
- regs->ds = read_segment_register(ds);
- regs->es = read_segment_register(es);
- regs->fs = read_segment_register(fs);
- regs->gs = read_segment_register(gs);
+ regs->ds = read_segment_register(v, regs, ds);
+ regs->es = read_segment_register(v, regs, es);
+ regs->fs = read_segment_register(v, regs, fs);
+ regs->gs = read_segment_register(v, regs, gs);
if ( regs->ds )
dirty_segment_mask |= DIRTY_DS;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 0ac96ab..c2156af 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -664,6 +664,45 @@ static void vmx_ctxt_switch_to(struct vcpu *v)
.fields = { .type = 0xb, .s = 0, .dpl = 0, .p = 1, .avl = 0, \
.l = 0, .db = 0, .g = 0, .pad = 0 } }).bytes)
+u16 vmx_read_selector(struct vcpu *v, enum x86_segment seg)
+{
+ u16 sel = 0;
+
+ vmx_vmcs_enter(v);
+ switch ( seg )
+ {
+ case x86_seg_cs:
+ sel = __vmread(GUEST_CS_SELECTOR);
+ break;
+
+ case x86_seg_ss:
+ sel = __vmread(GUEST_SS_SELECTOR);
+ break;
+
+ case x86_seg_es:
+ sel = __vmread(GUEST_ES_SELECTOR);
+ break;
+
+ case x86_seg_ds:
+ sel = __vmread(GUEST_DS_SELECTOR);
+ break;
+
+ case x86_seg_fs:
+ sel = __vmread(GUEST_FS_SELECTOR);
+ break;
+
+ case x86_seg_gs:
+ sel = __vmread(GUEST_GS_SELECTOR);
+ break;
+
+ default:
+ BUG();
+ }
+ vmx_vmcs_exit(v);
+
+ return sel;
+}
+
void vmx_get_segment_register(struct vcpu *v, enum x86_segment seg,
struct segment_register *reg)
{
@@ -1566,6 +1605,7 @@ static struct hvm_function_table __initdata
vmx_function_table = {
.sync_pir_to_irr = vmx_sync_pir_to_irr,
.handle_eoi = vmx_handle_eoi,
.nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
+ .read_selector = vmx_read_selector,
};
const struct hvm_function_table * __init start_vmx(void)
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index e4f080c..a5d8349 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -1833,8 +1833,6 @@ static inline uint64_t guest_misc_enable(uint64_t val)
} \
(eip) += sizeof(_x); _x; })
-#define read_sreg(regs, sr) read_segment_register(sr)
-
static int is_cpufreq_controller(struct domain *d)
{
return ((cpufreq_controller == FREQCTL_dom0_kernel) &&
@@ -1879,7 +1877,7 @@ static int emulate_privileged_op(struct cpu_user_regs
*regs)
goto fail;
/* emulating only opcodes not allowing SS to be default */
- data_sel = read_sreg(regs, ds);
+ data_sel = read_segment_register(v, regs, ds);
/* Legacy prefixes. */
for ( i = 0; i < 8; i++, rex == opcode || (rex = 0) )
@@ -1897,17 +1895,17 @@ static int emulate_privileged_op(struct cpu_user_regs
*regs)
data_sel = regs->cs;
continue;
case 0x3e: /* DS override */
- data_sel = read_sreg(regs, ds);
+ data_sel = read_segment_register(v, regs, ds);
continue;
case 0x26: /* ES override */
- data_sel = read_sreg(regs, es);
+ data_sel = read_segment_register(v, regs, es);
continue;
case 0x64: /* FS override */
- data_sel = read_sreg(regs, fs);
+ data_sel = read_segment_register(v, regs, fs);
lm_ovr = lm_seg_fs;
continue;
case 0x65: /* GS override */
- data_sel = read_sreg(regs, gs);
+ data_sel = read_segment_register(v, regs, gs);
lm_ovr = lm_seg_gs;
continue;
case 0x36: /* SS override */
@@ -1954,7 +1952,7 @@ static int emulate_privileged_op(struct cpu_user_regs
*regs)
if ( !(opcode & 2) )
{
- data_sel = read_sreg(regs, es);
+ data_sel = read_segment_register(v, regs, es);
lm_ovr = lm_seg_none;
}
@@ -2687,22 +2685,22 @@ static void emulate_gate_op(struct cpu_user_regs *regs)
ASSERT(opnd_sel);
continue;
case 0x3e: /* DS override */
- opnd_sel = read_sreg(regs, ds);
+ opnd_sel = read_segment_register(v, regs, ds);
if ( !opnd_sel )
opnd_sel = dpl;
continue;
case 0x26: /* ES override */
- opnd_sel = read_sreg(regs, es);
+ opnd_sel = read_segment_register(v, regs, es);
if ( !opnd_sel )
opnd_sel = dpl;
continue;
case 0x64: /* FS override */
- opnd_sel = read_sreg(regs, fs);
+ opnd_sel = read_segment_register(v, regs, fs);
if ( !opnd_sel )
opnd_sel = dpl;
continue;
case 0x65: /* GS override */
- opnd_sel = read_sreg(regs, gs);
+ opnd_sel = read_segment_register(v, regs, gs);
if ( !opnd_sel )
opnd_sel = dpl;
continue;
@@ -2755,7 +2753,7 @@ static void emulate_gate_op(struct cpu_user_regs *regs)
switch ( modrm & 7 )
{
default:
- opnd_sel = read_sreg(regs, ds);
+ opnd_sel = read_segment_register(v, regs, ds);
break;
case 4: case 5:
opnd_sel = regs->ss;
@@ -2783,7 +2781,7 @@ static void emulate_gate_op(struct cpu_user_regs *regs)
break;
}
if ( !opnd_sel )
- opnd_sel = read_sreg(regs, ds);
+ opnd_sel = read_segment_register(v, regs, ds);
switch ( modrm & 7 )
{
case 0: case 2: case 4:
diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c
index 8644aaf..3dfb309 100644
--- a/xen/arch/x86/x86_64/traps.c
+++ b/xen/arch/x86/x86_64/traps.c
@@ -123,10 +123,10 @@ void show_registers(struct cpu_user_regs *regs)
fault_crs[0] = read_cr0();
fault_crs[3] = read_cr3();
fault_crs[4] = read_cr4();
- fault_regs.ds = read_segment_register(ds);
- fault_regs.es = read_segment_register(es);
- fault_regs.fs = read_segment_register(fs);
- fault_regs.gs = read_segment_register(gs);
+ fault_regs.ds = read_segment_register(v, regs, ds);
+ fault_regs.es = read_segment_register(v, regs, es);
+ fault_regs.fs = read_segment_register(v, regs, fs);
+ fault_regs.gs = read_segment_register(v, regs, gs);
}
print_xen_info();
@@ -239,10 +239,10 @@ void do_double_fault(struct cpu_user_regs *regs)
crs[2] = read_cr2();
crs[3] = read_cr3();
crs[4] = read_cr4();
- regs->ds = read_segment_register(ds);
- regs->es = read_segment_register(es);
- regs->fs = read_segment_register(fs);
- regs->gs = read_segment_register(gs);
+ regs->ds = read_segment_register(current, regs, ds);
+ regs->es = read_segment_register(current, regs, es);
+ regs->fs = read_segment_register(current, regs, fs);
+ regs->gs = read_segment_register(current, regs, gs);
printk("CPU: %d\n", cpu);
_show_registers(regs, crs, CTXT_hypervisor, NULL);
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 00489cf..401fa4c 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -193,6 +193,7 @@ struct hvm_function_table {
paddr_t *L1_gpa, unsigned int *page_order,
uint8_t *p2m_acc, bool_t access_r,
bool_t access_w, bool_t access_x);
+ u16 (*read_selector)(struct vcpu *v, enum x86_segment seg);
};
extern struct hvm_function_table hvm_funcs;
@@ -344,6 +345,11 @@ static inline int hvm_event_pending(struct vcpu *v)
return hvm_funcs.event_pending(v);
}
+static inline u16 pvh_get_selector(struct vcpu *v, enum x86_segment seg)
+{
+ return hvm_funcs.read_selector(v, seg);
+}
+
/* These reserved bits in lower 32 remain 0 after any load of CR0 */
#define HVM_CR0_GUEST_RESERVED_BITS \
(~((unsigned long) \
diff --git a/xen/include/asm-x86/system.h b/xen/include/asm-x86/system.h
index 6ab7d56..1242657 100644
--- a/xen/include/asm-x86/system.h
+++ b/xen/include/asm-x86/system.h
@@ -4,10 +4,21 @@
#include <xen/lib.h>
#include <xen/bitops.h>
-#define read_segment_register(name) \
-({ u16 __sel; \
- asm volatile ( "movw %%" STR(name) ",%0" : "=r" (__sel) ); \
- __sel; \
+/*
+ * We need vcpu because during context switch, going from PV to PVH,
+ * in save_segments() current has been updated to next, and no longer pointing
+ * to the PV, but the intention is to get selector for the PV. Checking
+ * is_pvh_vcpu(current) will yield incorrect results in such a case.
+ */
+#define read_segment_register(vcpu, regs, name) \
+({ u16 __sel; \
+ struct cpu_user_regs *_regs = (regs); \
+ \
+ if ( is_pvh_vcpu(vcpu) && guest_mode(_regs) ) \
+ __sel = pvh_get_selector(vcpu, x86_seg_##name); \
+ else \
+ asm volatile ( "movw %%" #name ",%0" : "=r" (__sel) ); \
+ __sel; \
})
#define wbinvd() \
--
1.7.9.5
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |