|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 05/18] x86/pv: clean up emulate_ops.c
Please can you fold this following delta?
---
xen/arch/x86/pv/emulate_ops.c | 101 +++++++++++++++++++++---------------------
1 file changed, 51 insertions(+), 50 deletions(-)
diff --git a/xen/arch/x86/pv/emulate_ops.c b/xen/arch/x86/pv/emulate_ops.c
index 97c8d14..9341dec 100644
--- a/xen/arch/x86/pv/emulate_ops.c
+++ b/xen/arch/x86/pv/emulate_ops.c
@@ -39,7 +39,7 @@
#include <xsm/xsm.h>
-/* I/O emulation support. Helper routines for, and type of, the stack stub.*/
+/* I/O emulation support. Helper routines for, and type of, the stack stub. */
void host_to_guest_gpr_switch(struct cpu_user_regs *);
unsigned long guest_to_host_gpr_switch(unsigned long);
@@ -318,15 +318,14 @@ static io_emul_stub_t *io_emul_stub_setup(struct
priv_op_ctxt *ctxt, u8 opcode,
}
/* Has the guest requested sufficient permission for this I/O access? */
-static int guest_io_okay(unsigned int port, unsigned int bytes,
- struct vcpu *v, struct cpu_user_regs *regs)
+static bool guest_io_okay(unsigned int port, unsigned int bytes,
+ struct vcpu *v, struct cpu_user_regs *regs)
{
/* If in user mode, switch to kernel mode just to read I/O bitmap. */
- int user_mode = !(v->arch.flags & TF_kernel_mode);
-#define TOGGLE_MODE() if ( user_mode ) toggle_guest_mode(v)
+ const bool user_mode = !(v->arch.flags & TF_kernel_mode);
if ( iopl_ok(v, regs) )
- return 1;
+ return true;
if ( v->arch.pv_vcpu.iobmp_limit > (port + bytes) )
{
@@ -336,9 +335,11 @@ static int guest_io_okay(unsigned int port, unsigned int
bytes,
* Grab permission bytes from guest space. Inaccessible bytes are
* read as 0xff (no access allowed).
*/
- TOGGLE_MODE();
+ if ( user_mode )
+ toggle_guest_mode(v);
+
switch ( __copy_from_guest_offset(x.bytes, v->arch.pv_vcpu.iobmp,
- port>>3, 2) )
+ port >> 3, 2) )
{
default: x.bytes[0] = ~0;
/* fallthrough */
@@ -346,14 +347,15 @@ static int guest_io_okay(unsigned int port, unsigned int
bytes,
/* fallthrough */
case 0: break;
}
- TOGGLE_MODE();
- if ( (x.mask & (((1<<bytes)-1) << (port&7))) == 0 )
- return 1;
+ if ( user_mode )
+ toggle_guest_mode(v);
+
+ if ( (x.mask & (((1u << bytes) - 1) << (port & 7))) == 0 )
+ return true;
}
-#undef TOGGLE_MODE
- return 0;
+ return false;
}
static unsigned int check_guest_io_breakpoint(struct vcpu *v,
@@ -386,7 +388,7 @@ static unsigned int check_guest_io_breakpoint(struct vcpu
*v,
}
if ( (start < (port + len)) && ((start + width) > port) )
- match |= 1 << i;
+ match |= 1u << i;
}
return match;
@@ -401,11 +403,11 @@ static bool admin_io_okay(unsigned int port, unsigned int
bytes,
* We never permit direct access to that register.
*/
if ( (port == 0xcf8) && (bytes == 4) )
- return 0;
+ return false;
/* We also never permit direct access to the RTC/CMOS registers. */
if ( ((port & ~1) == RTC_PORT(0)) )
- return 0;
+ return false;
return ioports_access_permitted(d, port, port + bytes - 1);
}
@@ -416,10 +418,10 @@ static bool pci_cfg_ok(struct domain *currd, unsigned int
start,
uint32_t machine_bdf;
if ( !is_hardware_domain(currd) )
- return 0;
+ return false;
if ( !CF8_ENABLED(currd->arch.pci_cf8) )
- return 1;
+ return true;
machine_bdf = CF8_BDF(currd->arch.pci_cf8);
if ( write )
@@ -427,7 +429,7 @@ static bool pci_cfg_ok(struct domain *currd, unsigned int
start,
const unsigned long *ro_map = pci_get_ro_map(0);
if ( ro_map && test_bit(machine_bdf, ro_map) )
- return 0;
+ return false;
}
start |= CF8_ADDR_LO(currd->arch.pci_cf8);
/* AMD extended configuration space access? */
@@ -438,7 +440,7 @@ static bool pci_cfg_ok(struct domain *currd, unsigned int
start,
uint64_t msr_val;
if ( rdmsr_safe(MSR_AMD64_NB_CFG, msr_val) )
- return 0;
+ return false;
if ( msr_val & (1ULL << AMD64_NB_CFG_CF8_EXT_ENABLE_BIT) )
start |= CF8_ADDR_HI(currd->arch.pci_cf8);
}
@@ -835,7 +837,7 @@ static int priv_op_write_cr(unsigned int reg, unsigned long
val,
if ( (val ^ read_cr0()) & ~X86_CR0_TS )
{
gdprintk(XENLOG_WARNING,
- "Attempt to change unmodifiable CR0 flags\n");
+ "Attempt to change unmodifiable CR0 flags\n");
break;
}
do_fpu_taskswitch(!!(val & X86_CR0_TS));
@@ -948,11 +950,11 @@ static int priv_op_read_msr(unsigned int reg, uint64_t
*val,
*val = curr->arch.pv_vcpu.gs_base_user;
return X86EMUL_OKAY;
- /*
- * In order to fully retain original behavior, defer calling
- * pv_soft_rdtsc() until after emulation. This may want/need to be
- * reconsidered.
- */
+ /*
+ * In order to fully retain original behavior, defer calling
+ * pv_soft_rdtsc() until after emulation. This may want/need to be
+ * reconsidered.
+ */
case MSR_IA32_TSC:
poc->tsc |= TSC_BASE;
goto normal;
@@ -1042,16 +1044,16 @@ static int priv_op_read_msr(unsigned int reg, uint64_t
*val,
*val |= MSR_MISC_FEATURES_CPUID_FAULTING;
return X86EMUL_OKAY;
- case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7):
- case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(3):
- case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:
- case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+ case MSR_P6_PERFCTR(0) ... MSR_P6_PERFCTR(7):
+ case MSR_P6_EVNTSEL(0) ... MSR_P6_EVNTSEL(3):
+ case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR2:
+ case MSR_CORE_PERF_FIXED_CTR_CTRL ... MSR_CORE_PERF_GLOBAL_OVF_CTRL:
if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
{
vpmu_msr = true;
/* fall through */
- case MSR_AMD_FAM15H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5:
- case MSR_K7_EVNTSEL0...MSR_K7_PERFCTR3:
+ case MSR_AMD_FAM15H_EVNTSEL0 ... MSR_AMD_FAM15H_PERFCTR5:
+ case MSR_K7_EVNTSEL0 ... MSR_K7_PERFCTR3:
if ( vpmu_msr || (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) )
{
if ( vpmu_do_rdmsr(reg, val) )
@@ -1249,15 +1251,15 @@ static int priv_op_write_msr(unsigned int reg, uint64_t
val,
curr->arch.cpuid_faulting = !!(val & MSR_MISC_FEATURES_CPUID_FAULTING);
return X86EMUL_OKAY;
- case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7):
- case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(3):
- case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:
- case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+ case MSR_P6_PERFCTR(0) ... MSR_P6_PERFCTR(7):
+ case MSR_P6_EVNTSEL(0) ... MSR_P6_EVNTSEL(3):
+ case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR2:
+ case MSR_CORE_PERF_FIXED_CTR_CTRL ... MSR_CORE_PERF_GLOBAL_OVF_CTRL:
if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
{
vpmu_msr = true;
- case MSR_AMD_FAM15H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5:
- case MSR_K7_EVNTSEL0...MSR_K7_PERFCTR3:
+ case MSR_AMD_FAM15H_EVNTSEL0 ... MSR_AMD_FAM15H_PERFCTR5:
+ case MSR_K7_EVNTSEL0 ... MSR_K7_PERFCTR3:
if ( vpmu_msr || (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) )
{
if ( (vpmu_mode & XENPMU_MODE_ALL) &&
@@ -1542,7 +1544,6 @@ static int read_gate_descriptor(unsigned int gate_sel,
struct desc_struct desc;
const struct desc_struct *pdesc;
-
pdesc = (const struct desc_struct *)
(!(gate_sel & 4) ? GDT_VIRT_START(v) : LDT_VIRT_START(v))
+ (gate_sel >> 3);
@@ -1724,17 +1725,17 @@ void emulate_gate_op(struct cpu_user_regs *regs)
{
unsigned int ss, esp, *stkp;
int rc;
-#define push(item) do \
- { \
- --stkp; \
- esp -= 4; \
- rc = __put_user(item, stkp); \
- if ( rc ) \
- { \
- pv_inject_page_fault(PFEC_write_access, \
- (unsigned long)(stkp + 1) - rc); \
- return; \
- } \
+#define push(item) do \
+ { \
+ --stkp; \
+ esp -= 4; \
+ rc = __put_user(item, stkp); \
+ if ( rc ) \
+ { \
+ pv_inject_page_fault(PFEC_write_access, \
+ (unsigned long)(stkp + 1) - rc); \
+ return; \
+ } \
} while ( 0 )
if ( ((ar >> 13) & 3) < (regs->cs & 3) )
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |