[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 09/16] x86: provide stubs, declarations and macros in hvm.h



Make sure hvm_enabled evaluate to false then provide necessary things
to make xen build when !CONFIG_HVM.

Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
v3: rewritten
---
 xen/include/asm-x86/hvm/hvm.h | 97 +++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 97 insertions(+)

diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 0c321409ee..28fd483a04 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -340,6 +340,9 @@ const char *hvm_efer_valid(const struct vcpu *v, uint64_t 
value,
                            signed int cr0_pg);
 unsigned long hvm_cr4_guest_valid_bits(const struct domain *d, bool restore);
 
+
+#ifdef CONFIG_HVM
+
 #define hvm_get_guest_tsc(v) hvm_get_guest_tsc_fixed(v, 0)
 
 #define hvm_tsc_scaling_supported \
@@ -675,6 +678,100 @@ static inline bool altp2m_vcpu_emulate_ve(struct vcpu *v)
         d_->arch.hvm.pi_ops.vcpu_block(v_);                     \
 })
 
+#else  /* CONFIG_HVM */
+
+#define hvm_enabled false
+
+/*
+ * List of inline functions above, of which only declarations are
+ * needed because DCE will kick in.
+ */
+int hvm_guest_x86_mode(struct vcpu *v);
+unsigned long hvm_get_shadow_gs_base(struct vcpu *v);
+void hvm_set_info_guest(struct vcpu *v);
+void hvm_cpuid_policy_changed(struct vcpu *v);
+void hvm_set_tsc_offset(struct vcpu *v, uint64_t offset, uint64_t at_tsc);
+
+static inline bool hvm_is_singlestep_supported(void)
+{
+    return false;
+}
+
+static inline bool hvm_hap_supported(void)
+{
+    return false;
+}
+
+static inline bool nhvm_vmcx_hap_enabled(struct vcpu *v)
+{
+    ASSERT_UNREACHABLE();
+    return false;
+}
+
+static inline int hvm_cpu_up(void)
+{
+    return 0;
+}
+
+static inline void hvm_cpu_down(void) {}
+
+static inline void hvm_flush_guest_tlbs(void) {}
+
+static inline void hvm_update_host_cr3(struct vcpu *v)
+{
+    ASSERT_UNREACHABLE();
+}
+
+static inline void hvm_update_guest_cr3(struct vcpu *v, bool noflush)
+{
+    ASSERT_UNREACHABLE();
+}
+
+static inline unsigned int hvm_get_cpl(struct vcpu *v)
+{
+    ASSERT_UNREACHABLE();
+    return -1;
+}
+
+static inline int hvm_event_pending(struct vcpu *v)
+{
+    return 0;
+}
+
+static inline void hvm_inject_hw_exception(unsigned int vector, int errcode)
+{
+    ASSERT_UNREACHABLE();
+}
+
+static inline void hvm_invlpg(struct vcpu *v, unsigned long linear)
+{
+    ASSERT_UNREACHABLE();
+}
+
+#define is_viridian_domain(d) ({(void)(d); false;})
+#define has_viridian_time_ref_count(d) ({(void)(d); false;})
+#define hvm_long_mode_active(v) ({(void)(v); false;})
+#define hvm_pae_enabled(v) ({(void)(v); false;})
+#define hvm_get_guest_time(v) ({(void)(v); 0;})
+
+#define hvm_tsc_scaling_supported false
+#define hap_has_1gb false
+#define hap_has_2mb false
+
+
+#define hvm_paging_enabled(v) ({(void)(v); false;})
+#define hvm_wp_enabled(v) ({(void)(v); false;})
+#define hvm_pcid_enabled(v) ({(void)(v); false;})
+#define hvm_pae_enabled(v) ({(void)(v); false;})
+#define hvm_smep_enabled(v) ({(void)(v); false;})
+#define hvm_smap_enabled(v) ({(void)(v); false;})
+#define hvm_nx_enabled(v) ({(void)(v); false;})
+#define hvm_pku_enabled(v) ({(void)(v); false;})
+
+#define arch_vcpu_block(v) ((void)(v))
+
+#endif  /* CONFIG_HVM */
+
 #endif /* __ASM_X86_HVM_HVM_H__ */
 
 /*
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.