[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 1/3] introduce unaligned.h



Rather than open-coding commonly used constructs in yet more places when
pulling in zstd decompression support (and its xxhash prereq), pull out
the custom bits into a commonly used header (for the hypervisor build;
the tool stack and stubdom builds of libxenguest will still remain in
need of similarly taking care of). For now this is limited to x86, where
custom logic isn't needed (considering this is going to be used in init
code only, even using alternatives patching to use MOVBE doesn't seem
worthwhile).

No change in generated code.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
Iirc use of include/asm-generic/ was disliked, hence the generic header
goes into include/xen/.

--- a/xen/common/lz4/defs.h
+++ b/xen/common/lz4/defs.h
@@ -10,18 +10,21 @@
 
 #ifdef __XEN__
 #include <asm/byteorder.h>
-#endif
+#include <asm/unaligned.h>
+#else
 
-static inline u16 INIT get_unaligned_le16(const void *p)
+static inline u16 get_unaligned_le16(const void *p)
 {
        return le16_to_cpup(p);
 }
 
-static inline u32 INIT get_unaligned_le32(const void *p)
+static inline u32 get_unaligned_le32(const void *p)
 {
        return le32_to_cpup(p);
 }
 
+#endif
+
 /*
  * Detects 64 bits mode
  */
--- a/xen/common/lzo.c
+++ b/xen/common/lzo.c
@@ -97,13 +97,12 @@
 #ifdef __XEN__
 #include <xen/lib.h>
 #include <asm/byteorder.h>
+#include <asm/unaligned.h>
+#else
+#define get_unaligned_le16(_p) (*(u16 *)(_p))
 #endif
 
 #include <xen/lzo.h>
-#define get_unaligned(_p) (*(_p))
-#define put_unaligned(_val,_p) (*(_p)=_val)
-#define get_unaligned_le16(_p) (*(u16 *)(_p))
-#define get_unaligned_le32(_p) (*(u32 *)(_p))
 
 #include "decompress.h"
 
--- a/xen/common/unlzo.c
+++ b/xen/common/unlzo.c
@@ -34,30 +34,19 @@
 
 #ifdef __XEN__
 #include <asm/byteorder.h>
-#endif
+#include <asm/unaligned.h>
+#else
 
-#if 1 /* ndef CONFIG_??? */
-static inline u16 INIT get_unaligned_be16(void *p)
+static inline u16 get_unaligned_be16(const void *p)
 {
        return be16_to_cpup(p);
 }
 
-static inline u32 INIT get_unaligned_be32(void *p)
+static inline u32 get_unaligned_be32(const void *p)
 {
        return be32_to_cpup(p);
 }
-#else
-#include <asm/unaligned.h>
-
-static inline u16 INIT get_unaligned_be16(void *p)
-{
-       return be16_to_cpu(__get_unaligned(p, 2));
-}
 
-static inline u32 INIT get_unaligned_be32(void *p)
-{
-       return be32_to_cpu(__get_unaligned(p, 4));
-}
 #endif
 
 static const unsigned char lzop_magic[] = {
--- a/xen/common/xz/private.h
+++ b/xen/common/xz/private.h
@@ -13,34 +13,23 @@
 #ifdef __XEN__
 #include <xen/kernel.h>
 #include <asm/byteorder.h>
-#endif
-
-#define get_le32(p) le32_to_cpup((const uint32_t *)(p))
+#include <asm/unaligned.h>
+#else
 
-#if 1 /* ndef CONFIG_??? */
-static inline u32 INIT get_unaligned_le32(void *p)
+static inline u32 get_unaligned_le32(const void *p)
 {
        return le32_to_cpup(p);
 }
 
-static inline void INIT put_unaligned_le32(u32 val, void *p)
+static inline void put_unaligned_le32(u32 val, void *p)
 {
        *(__force __le32*)p = cpu_to_le32(val);
 }
-#else
-#include <asm/unaligned.h>
-
-static inline u32 INIT get_unaligned_le32(void *p)
-{
-       return le32_to_cpu(__get_unaligned(p, 4));
-}
 
-static inline void INIT put_unaligned_le32(u32 val, void *p)
-{
-       __put_unaligned(cpu_to_le32(val), p, 4);
-}
 #endif
 
+#define get_le32(p) le32_to_cpup((const uint32_t *)(p))
+
 #define false 0
 #define true 1
 
--- /dev/null
+++ b/xen/include/asm-x86/unaligned.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_UNALIGNED_H__
+#define __ASM_UNALIGNED_H__
+
+#include <xen/unaligned.h>
+
+#endif /* __ASM_UNALIGNED_H__ */
--- /dev/null
+++ b/xen/include/xen/unaligned.h
@@ -0,0 +1,79 @@
+/*
+ * This header can be used by architectures where unaligned accesses work
+ * witout faulting, and at least reasonably efficiently.  Other architectures
+ * will need to have a custom asm/unaligned.h.
+ */
+#ifndef __ASM_UNALIGNED_H__
+#error "xen/unaligned.h should not be included directly - include 
asm/unaligned.h instead"
+#endif
+
+#ifndef __XEN_UNALIGNED_H__
+#define __XEN_UNALIGNED_H__
+
+#include <xen/types.h>
+#include <asm/byteorder.h>
+
+#define get_unaligned(p) (*(p))
+#define put_unaligned(val, p) (*(p) = (val))
+
+static inline uint16_t get_unaligned_be16(const void *p)
+{
+       return be16_to_cpup(p);
+}
+
+static inline void put_unaligned_be16(uint16_t val, void *p)
+{
+       *(__force __be16*)p = cpu_to_be16(val);
+}
+
+static inline uint32_t get_unaligned_be32(const void *p)
+{
+       return be32_to_cpup(p);
+}
+
+static inline void put_unaligned_be32(uint32_t val, void *p)
+{
+       *(__force __be32*)p = cpu_to_be32(val);
+}
+
+static inline uint64_t get_unaligned_be64(const void *p)
+{
+       return be64_to_cpup(p);
+}
+
+static inline void put_unaligned_be64(uint64_t val, void *p)
+{
+       *(__force __be64*)p = cpu_to_be64(val);
+}
+
+static inline uint16_t get_unaligned_le16(const void *p)
+{
+       return le16_to_cpup(p);
+}
+
+static inline void put_unaligned_le16(uint16_t val, void *p)
+{
+       *(__force __le16*)p = cpu_to_le16(val);
+}
+
+static inline uint32_t get_unaligned_le32(const void *p)
+{
+       return le32_to_cpup(p);
+}
+
+static inline void put_unaligned_le32(uint32_t val, void *p)
+{
+       *(__force __le32*)p = cpu_to_le32(val);
+}
+
+static inline uint64_t get_unaligned_le64(const void *p)
+{
+       return le64_to_cpup(p);
+}
+
+static inline void put_unaligned_le64(uint64_t val, void *p)
+{
+       *(__force __le64*)p = cpu_to_le64(val);
+}
+
+#endif /* __XEN_UNALIGNED_H__ */




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.