|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 1/3] vpmu: Use macros to access struct vpmu_struct.flags
xen/arch/x86/hvm/svm/vpmu.c | 30 +++++++++++++++---------------
xen/arch/x86/hvm/vmx/vpmu_core2.c | 32 ++++++++++++++++----------------
xen/arch/x86/hvm/vpmu.c | 4 ++--
xen/arch/x86/oprofile/nmi_int.c | 4 ++--
xen/arch/x86/oprofile/op_model_ppro.c | 10 +++++-----
xen/include/asm-x86/hvm/vpmu.h | 6 ++++++
6 files changed, 46 insertions(+), 40 deletions(-)
This patch introduces some macros realising the access to the item 'flags'
in the struct vpmu_struct (see xen/include/asm-x86/hvm/vpmu.h).
Only bits within 'flags' are set/reset/checked.
Signed-off-by: Dietmar Hahn <dietmar.hahn@xxxxxxxxxxxxxx>
diff -r eae25241d571 xen/arch/x86/hvm/svm/vpmu.c
--- a/xen/arch/x86/hvm/svm/vpmu.c Tue Feb 07 15:05:19 2012 +0100
+++ b/xen/arch/x86/hvm/svm/vpmu.c Wed Feb 08 11:41:48 2012 +0100
@@ -188,8 +188,8 @@ static void amd_vpmu_restore(struct vcpu
struct vpmu_struct *vpmu = vcpu_vpmu(v);
struct amd_vpmu_context *ctxt = vpmu->context;
- if ( !((vpmu->flags & VPMU_CONTEXT_ALLOCATED) &&
- (vpmu->flags & VPMU_RUNNING)) )
+ if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
+ vpmu_is_set(vpmu, VPMU_RUNNING)) )
return;
context_restore(v);
@@ -214,8 +214,8 @@ static void amd_vpmu_save(struct vcpu *v
struct vpmu_struct *vpmu = vcpu_vpmu(v);
struct amd_vpmu_context *ctx = vpmu->context;
- if ( !((vpmu->flags & VPMU_CONTEXT_ALLOCATED) &&
- (vpmu->flags & VPMU_RUNNING)) )
+ if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
+ vpmu_is_set(vpmu, VPMU_RUNNING)) )
return;
context_save(v);
@@ -261,20 +261,20 @@ static int amd_vpmu_do_wrmsr(unsigned in
/* check if the first counter is enabled */
if ( (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) &&
- is_pmu_enabled(msr_content) && !(vpmu->flags & VPMU_RUNNING) )
+ is_pmu_enabled(msr_content) && !vpmu_is_set(vpmu, VPMU_RUNNING) )
{
if ( !acquire_pmu_ownership(PMU_OWNER_HVM) )
return 1;
- vpmu->flags |= VPMU_RUNNING;
+ vpmu_set(vpmu, VPMU_RUNNING);
apic_write(APIC_LVTPC, PMU_APIC_VECTOR);
}
/* stop saving & restore if guest stops first counter */
- if ( (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) &&
- (is_pmu_enabled(msr_content) == 0) && (vpmu->flags & VPMU_RUNNING) )
+ if ( (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) &&
+ (is_pmu_enabled(msr_content) == 0) && vpmu_is_set(vpmu, VPMU_RUNNING) )
{
apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED);
- vpmu->flags &= ~VPMU_RUNNING;
+ vpmu_reset(vpmu, VPMU_RUNNING);
release_pmu_ownship(PMU_OWNER_HVM);
}
@@ -298,7 +298,7 @@ static void amd_vpmu_initialise(struct v
struct vpmu_struct *vpmu = vcpu_vpmu(v);
uint8_t family = current_cpu_data.x86;
- if ( vpmu->flags & VPMU_CONTEXT_ALLOCATED )
+ if ( vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
return;
if ( counters == NULL )
@@ -333,22 +333,22 @@ static void amd_vpmu_initialise(struct v
}
vpmu->context = (void *)ctxt;
- vpmu->flags |= VPMU_CONTEXT_ALLOCATED;
+ vpmu_set(vpmu, VPMU_CONTEXT_ALLOCATED);
}
static void amd_vpmu_destroy(struct vcpu *v)
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- if ( !(vpmu->flags & VPMU_CONTEXT_ALLOCATED) )
+ if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
return;
xfree(vpmu->context);
- vpmu->flags &= ~VPMU_CONTEXT_ALLOCATED;
+ vpmu_reset(vpmu, VPMU_CONTEXT_ALLOCATED);
- if ( vpmu->flags & VPMU_RUNNING )
+ if ( vpmu_is_set(vpmu, VPMU_RUNNING) )
{
- vpmu->flags &= ~VPMU_RUNNING;
+ vpmu_reset(vpmu, VPMU_RUNNING);
release_pmu_ownship(PMU_OWNER_HVM);
}
}
diff -r eae25241d571 xen/arch/x86/hvm/vmx/vpmu_core2.c
--- a/xen/arch/x86/hvm/vmx/vpmu_core2.c Tue Feb 07 15:05:19 2012 +0100
+++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c Wed Feb 08 11:41:48 2012 +0100
@@ -266,17 +266,17 @@ static void core2_vpmu_save(struct vcpu
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- if ( !((vpmu->flags & VPMU_CONTEXT_ALLOCATED) &&
- (vpmu->flags & VPMU_CONTEXT_LOADED)) )
+ if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
+ vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)) )
return;
__core2_vpmu_save(v);
/* Unset PMU MSR bitmap to trap lazy load. */
- if ( !(vpmu->flags & VPMU_RUNNING) && cpu_has_vmx_msr_bitmap )
+ if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && cpu_has_vmx_msr_bitmap )
core2_vpmu_unset_msr_bitmap(v->arch.hvm_vmx.msr_bitmap);
- vpmu->flags &= ~VPMU_CONTEXT_LOADED;
+ vpmu_reset(vpmu, VPMU_CONTEXT_LOADED);
return;
}
@@ -303,11 +303,11 @@ static void core2_vpmu_load(struct vcpu
struct vpmu_struct *vpmu = vcpu_vpmu(v);
/* Only when PMU is counting, we load PMU context immediately. */
- if ( !((vpmu->flags & VPMU_CONTEXT_ALLOCATED) &&
- (vpmu->flags & VPMU_RUNNING)) )
+ if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
+ vpmu_is_set(vpmu, VPMU_RUNNING)) )
return;
__core2_vpmu_load(v);
- vpmu->flags |= VPMU_CONTEXT_LOADED;
+ vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
}
static int core2_vpmu_alloc_resource(struct vcpu *v)
@@ -373,17 +373,17 @@ static int core2_vpmu_msr_common_check(u
if ( !is_core2_vpmu_msr(msr_index, type, index) )
return 0;
- if ( unlikely(!(vpmu->flags & VPMU_CONTEXT_ALLOCATED)) &&
+ if ( unlikely(!vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED)) &&
(vpmu->context != NULL ||
!core2_vpmu_alloc_resource(current)) )
return 0;
- vpmu->flags |= VPMU_CONTEXT_ALLOCATED;
+ vpmu_set(vpmu, VPMU_CONTEXT_ALLOCATED);
/* Do the lazy load staff. */
- if ( !(vpmu->flags & VPMU_CONTEXT_LOADED) )
+ if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )
{
__core2_vpmu_load(current);
- vpmu->flags |= VPMU_CONTEXT_LOADED;
+ vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
if ( cpu_has_vmx_msr_bitmap )
core2_vpmu_set_msr_bitmap(current->arch.hvm_vmx.msr_bitmap);
}
@@ -467,12 +467,12 @@ static int core2_vpmu_do_wrmsr(unsigned
for ( i = 0; i < core2_get_pmc_count(); i++ )
pmu_enable |= core2_vpmu_cxt->pmu_enable->arch_pmc_enable[i];
if ( pmu_enable )
- vpmu->flags |= VPMU_RUNNING;
+ vpmu_set(vpmu, VPMU_RUNNING);
else
- vpmu->flags &= ~VPMU_RUNNING;
+ vpmu_reset(vpmu, VPMU_RUNNING);
/* Setup LVTPC in local apic */
- if ( vpmu->flags & VPMU_RUNNING &&
+ if ( vpmu_is_set(vpmu, VPMU_RUNNING) &&
is_vlapic_lvtpc_enabled(vcpu_vlapic(v)) )
apic_write_around(APIC_LVTPC, PMU_APIC_VECTOR);
else
@@ -588,14 +588,14 @@ static void core2_vpmu_destroy(struct vc
struct vpmu_struct *vpmu = vcpu_vpmu(v);
struct core2_vpmu_context *core2_vpmu_cxt = vpmu->context;
- if ( !(vpmu->flags & VPMU_CONTEXT_ALLOCATED) )
+ if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
return;
xfree(core2_vpmu_cxt->pmu_enable);
xfree(vpmu->context);
if ( cpu_has_vmx_msr_bitmap )
core2_vpmu_unset_msr_bitmap(v->arch.hvm_vmx.msr_bitmap);
release_pmu_ownship(PMU_OWNER_HVM);
- vpmu->flags &= ~VPMU_CONTEXT_ALLOCATED;
+ vpmu_reset(vpmu, VPMU_CONTEXT_ALLOCATED);
}
struct arch_vpmu_ops core2_vpmu_ops = {
diff -r eae25241d571 xen/arch/x86/hvm/vpmu.c
--- a/xen/arch/x86/hvm/vpmu.c Tue Feb 07 15:05:19 2012 +0100
+++ b/xen/arch/x86/hvm/vpmu.c Wed Feb 08 11:41:48 2012 +0100
@@ -86,7 +86,7 @@ void vpmu_initialise(struct vcpu *v)
if ( !opt_vpmu_enabled )
return;
- if ( vpmu->flags & VPMU_CONTEXT_ALLOCATED )
+ if ( vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
vpmu_destroy(v);
switch ( vendor )
@@ -110,7 +110,7 @@ void vpmu_initialise(struct vcpu *v)
if ( vpmu->arch_vpmu_ops != NULL )
{
- vpmu->flags = 0;
+ vpmu_clear(vpmu);
vpmu->context = NULL;
vpmu->arch_vpmu_ops->arch_vpmu_initialise(v);
}
diff -r eae25241d571 xen/arch/x86/oprofile/nmi_int.c
--- a/xen/arch/x86/oprofile/nmi_int.c Tue Feb 07 15:05:19 2012 +0100
+++ b/xen/arch/x86/oprofile/nmi_int.c Wed Feb 08 11:41:48 2012 +0100
@@ -47,7 +47,7 @@ static int passive_domain_msr_op_checks(
if ( !model->is_arch_pmu_msr(msr, typep, indexp) )
return 0;
- if ( !(vpmu->flags & PASSIVE_DOMAIN_ALLOCATED) )
+ if ( !vpmu_is_set(vpmu, PASSIVE_DOMAIN_ALLOCATED) )
if ( ! model->allocated_msr(current) )
return 0;
return 1;
@@ -78,7 +78,7 @@ int passive_domain_do_wrmsr(unsigned int
void passive_domain_destroy(struct vcpu *v)
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- if ( vpmu->flags & PASSIVE_DOMAIN_ALLOCATED )
+ if ( vpmu_is_set(vpmu, PASSIVE_DOMAIN_ALLOCATED) )
model->free_msr(v);
}
diff -r eae25241d571 xen/arch/x86/oprofile/op_model_ppro.c
--- a/xen/arch/x86/oprofile/op_model_ppro.c Tue Feb 07 15:05:19 2012 +0100
+++ b/xen/arch/x86/oprofile/op_model_ppro.c Wed Feb 08 11:41:48 2012 +0100
@@ -143,7 +143,7 @@ static int ppro_check_ctrs(unsigned int
xenoprof_log_event(current, regs, eip, mode, i);
wrmsrl(msrs->counters[i].addr, -reset_value[i]);
if ( is_passive(current->domain) && (mode != 2) &&
- (vcpu_vpmu(current)->flags &
PASSIVE_DOMAIN_ALLOCATED) )
+ vpmu_is_set(vcpu_vpmu(current),
PASSIVE_DOMAIN_ALLOCATED) )
{
if ( IS_ACTIVE(msrs_content[i].control) )
{
@@ -230,8 +230,8 @@ static int ppro_allocate_msr(struct vcpu
if ( !msr_content )
goto out;
vpmu->context = (void *)msr_content;
- vpmu->flags = 0;
- vpmu->flags |= PASSIVE_DOMAIN_ALLOCATED;
+ vpmu_clear(vpmu);
+ vpmu_set(vpmu, PASSIVE_DOMAIN_ALLOCATED);
return 1;
out:
gdprintk(XENLOG_WARNING, "Insufficient memory for oprofile, oprofile
is "
@@ -244,10 +244,10 @@ static void ppro_free_msr(struct vcpu *v
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- if ( !(vpmu->flags & PASSIVE_DOMAIN_ALLOCATED) )
+ if ( !vpmu_is_set(vpmu, PASSIVE_DOMAIN_ALLOCATED) )
return;
xfree(vpmu->context);
- vpmu->flags &= ~PASSIVE_DOMAIN_ALLOCATED;
+ vpmu_reset(vpmu, PASSIVE_DOMAIN_ALLOCATED);
}
static void ppro_load_msr(struct vcpu *v, int type, int index, u64
*msr_content)
diff -r eae25241d571 xen/include/asm-x86/hvm/vpmu.h
--- a/xen/include/asm-x86/hvm/vpmu.h Tue Feb 07 15:05:19 2012 +0100
+++ b/xen/include/asm-x86/hvm/vpmu.h Wed Feb 08 11:41:48 2012 +0100
@@ -69,6 +69,12 @@ struct vpmu_struct {
#define VPMU_CONTEXT_LOADED 0x2
#define VPMU_RUNNING 0x4
#define PASSIVE_DOMAIN_ALLOCATED 0x8
+
+#define vpmu_set(_vpmu, _x) ((_vpmu)->flags |= (_x))
+#define vpmu_reset(_vpmu, _x) ((_vpmu)->flags &= ~(_x))
+#define vpmu_is_set(_vpmu, _x) ((_vpmu)->flags & (_x))
+#define vpmu_clear(_vpmu) ((_vpmu)->flags = 0)
+
int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content);
int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content);
int vpmu_do_interrupt(struct cpu_user_regs *regs);
--
Company details: http://ts.fujitsu.com/imprint.html
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |