x86: re-order struct arch_domain fields ... to reduce padding holes. While doing this I noticed vtsc_usercount is a PV-only thing, so it gets moved straight to struct pv_domain. Signed-off-by: Jan Beulich --- a/xen/arch/x86/time.c +++ b/xen/arch/x86/time.c @@ -1767,7 +1767,7 @@ void pv_soft_rdtsc(struct vcpu *v, struc if ( guest_kernel_mode(v, regs) ) d->arch.vtsc_kerncount++; else - d->arch.vtsc_usercount++; + d->arch.pv_domain.vtsc_usercount++; if ( (int64_t)(now - d->arch.vtsc_last) > 0 ) d->arch.vtsc_last = now; @@ -2020,17 +2020,15 @@ static void dump_softtsc(unsigned char k printk(",khz=%"PRIu32, d->arch.tsc_khz); if ( d->arch.incarnation ) printk(",inc=%"PRIu32, d->arch.incarnation); - if ( !(d->arch.vtsc_kerncount | d->arch.vtsc_usercount) ) - { - printk("\n"); - continue; - } - if ( is_hvm_domain(d) ) + if ( is_hvm_domain(d) && d->arch.vtsc_kerncount ) printk(",vtsc count: %"PRIu64" total\n", d->arch.vtsc_kerncount); - else + else if ( is_pv_domain(d) && + (d->arch.vtsc_kerncount | d->arch.pv_domain.vtsc_usercount) ) printk(",vtsc count: %"PRIu64" kernel, %"PRIu64" user\n", - d->arch.vtsc_kerncount, d->arch.vtsc_usercount); + d->arch.vtsc_kerncount, d->arch.pv_domain.vtsc_usercount); + else + printk("\n"); domcnt++; } --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -242,6 +242,8 @@ struct pv_domain /* map_domain_page() mapping cache. */ struct mapcache_domain mapcache; + + uint64_t vtsc_usercount; /* not used for hvm */ }; struct arch_domain @@ -250,13 +252,16 @@ struct arch_domain unsigned int hv_compat_vstart; - bool_t s3_integrity; + /* Maximum physical-address bitwidth supported by this guest. */ + unsigned int physaddr_bitsize; /* I/O-port admin-specified access capabilities. */ struct rangeset *ioport_caps; uint32_t pci_cf8; uint8_t cmos_idx; + bool_t s3_integrity; + struct list_head pdev_list; union { @@ -270,6 +275,18 @@ struct arch_domain * page_alloc lock */ int page_alloc_unlock_level; + /* Continuable domain_relinquish_resources(). */ + enum { + RELMEM_not_started, + RELMEM_shared, + RELMEM_xen, + RELMEM_l4, + RELMEM_l3, + RELMEM_l2, + RELMEM_done, + } relmem; + struct page_list_head relmem_list; + /* nestedhvm: translate l2 guest physical to host physical */ struct p2m_domain *nested_p2m[MAX_NESTEDP2M]; mm_lock_t nested_p2m_lock; @@ -277,27 +294,16 @@ struct arch_domain /* NB. protected by d->event_lock and by irq_desc[irq].lock */ struct radix_tree_root irq_pirq; - /* Maximum physical-address bitwidth supported by this guest. */ - unsigned int physaddr_bitsize; - /* Is a 32-bit PV (non-HVM) guest? */ bool_t is_32bit_pv; /* Is shared-info page in 32-bit format? */ bool_t has_32bit_shinfo; + /* Domain cannot handle spurious page faults? */ bool_t suppress_spurious_page_faults; - /* Continuable domain_relinquish_resources(). */ - enum { - RELMEM_not_started, - RELMEM_shared, - RELMEM_xen, - RELMEM_l4, - RELMEM_l3, - RELMEM_l2, - RELMEM_done, - } relmem; - struct page_list_head relmem_list; + /* Is PHYSDEVOP_eoi to automatically unmask the event channel? */ + bool_t auto_unmask; cpuid_input_t *cpuids; @@ -315,22 +321,18 @@ struct arch_domain uint32_t incarnation; /* incremented every restore or live migrate (possibly other cases in the future */ uint64_t vtsc_kerncount; /* for hvm, counts all vtsc */ - uint64_t vtsc_usercount; /* not used for hvm */ /* Pseudophysical e820 map (XENMEM_memory_map). */ spinlock_t e820_lock; struct e820entry *e820; unsigned int nr_e820; - /* set auto_unmask to 1 if you want PHYSDEVOP_eoi to automatically - * unmask the event channel */ - bool_t auto_unmask; + unsigned int psr_rmid; /* RMID assigned to the domain for CMT */ + /* Shared page for notifying that explicit PIRQ EOI is required. */ unsigned long *pirq_eoi_map; unsigned long pirq_eoi_map_mfn; - - unsigned int psr_rmid; /* RMID assigned to the domain for CMT */ -} __cacheline_aligned; +}; #define has_arch_pdevs(d) (!list_empty(&(d)->arch.pdev_list)) @@ -486,7 +488,7 @@ struct arch_vcpu unsigned long eip; } mem_event; -} __cacheline_aligned; +}; smap_check_policy_t smap_policy_change(struct vcpu *v, smap_check_policy_t new_policy);