summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorKim Phillips <kim.phillips@freescale.com>2012-10-29 13:34:23 +0000
committerTom Rini <trini@ti.com>2012-11-04 11:00:34 -0700
commiteef1cf2d5cf1cae5fb76713e912263dedf110aeb (patch)
tree642348502001a6efaffd9e265e2de2e91d7316ac /include/linux
parentf9b55e22856a97523074f3dc40ea5d196298756a (diff)
downloadu-boot-imx-eef1cf2d5cf1cae5fb76713e912263dedf110aeb.zip
u-boot-imx-eef1cf2d5cf1cae5fb76713e912263dedf110aeb.tar.gz
u-boot-imx-eef1cf2d5cf1cae5fb76713e912263dedf110aeb.tar.bz2
include/linux/byteorder: import latest endian definitions from linux
u-boot's byteorder headers did not contain endianness attributions for use with sparse, causing a lot of false positives. Import the kernel's latest definitions, and enable them by including compiler.h and types.h. They come with 'const' added for some swab functions, so fix those up, too: include/linux/byteorder/big_endian.h:46:2: warning: passing argument 1 of '__swab64p' discards 'const' qualifier from pointer target type [enabled by default] Also, note: u-boot's historic __BYTE_ORDER definition has been preserved (for the time being at least). We also remove ad-hoc barrier() definitions, since we're including compiler.h in files that hadn't in the past: macb.c:54:0: warning: "barrier" redefined [enabled by default] In addition, including compiler.h in byteorder changes the 'noinline' definition to expand to __attribute__((noinline)). This fixes arch/powerpc/lib/bootm.c: bootm.c:329:16: error: attribute '__attribute__': unknown attribute bootm.c:329:16: error: expected ')' before '__attribute__' bootm.c:329:25: error: expected identifier or '(' before ')' token powerpc sparse builds yield: include/common.h:356:22: error: marked inline, but without a definition the unknown-reason inlining without a definition is considered obsolete given it was part of the 2002 initial commit, and no arm version was 'fixed.' also fixed: ydirectenv.h:60:0: warning: "inline" redefined [enabled by default] and: Configuring for devconcenter - Board: intip, Options: DEVCONCENTER make[1]: *** [4xx_ibm_ddr2_autocalib.o] Error 1 make: *** [arch/powerpc/cpu/ppc4xx/libppc4xx.o] Error 2 powerpc-fsl-linux-size: './u-boot': No such file 4xx_ibm_ddr2_autocalib.c: In function 'DQS_autocalibration': include/asm/ppc4xx-sdram.h:1407:13: sorry, unimplemented: inlining failed in call to 'ppc4xx_ibm_ddr2_register_dump': function body not available 4xx_ibm_ddr2_autocalib.c:1243:32: sorry, unimplemented: called from here and: In file included from crc32.c:50:0: crc32table.h:4:1: warning: implicit declaration of function '___constant_swab32' [-Wimplicit-function-declaration] crc32table.h:4:1: error: initializer element is not constant crc32table.h:4:1: error: (near initialization for 'crc32table_le[0]') Signed-off-by: Kim Phillips <kim.phillips@freescale.com> [trini: Remove '#endif' in include/common.h around setenv portion] Signed-off-by: Tom Rini <trini@ti.com>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/byteorder/big_endian.h135
-rw-r--r--include/linux/byteorder/little_endian.h133
-rw-r--r--include/linux/byteorder/swab.h6
3 files changed, 178 insertions, 96 deletions
diff --git a/include/linux/byteorder/big_endian.h b/include/linux/byteorder/big_endian.h
index 19b0c86..aaf7757 100644
--- a/include/linux/byteorder/big_endian.h
+++ b/include/linux/byteorder/big_endian.h
@@ -7,63 +7,104 @@
#ifndef __BIG_ENDIAN_BITFIELD
#define __BIG_ENDIAN_BITFIELD
#endif
-#define __BYTE_ORDER __BIG_ENDIAN
+#define __BYTE_ORDER __BIG_ENDIAN
+#include <linux/compiler.h>
+#include <linux/types.h>
#include <linux/byteorder/swab.h>
-#define __constant_htonl(x) ((__u32)(x))
-#define __constant_ntohl(x) ((__u32)(x))
-#define __constant_htons(x) ((__u16)(x))
-#define __constant_ntohs(x) ((__u16)(x))
-#define __constant_cpu_to_le64(x) ___swab64((x))
-#define __constant_le64_to_cpu(x) ___swab64((x))
-#define __constant_cpu_to_le32(x) ___swab32((x))
-#define __constant_le32_to_cpu(x) ___swab32((x))
-#define __constant_cpu_to_le16(x) ___swab16((x))
-#define __constant_le16_to_cpu(x) ___swab16((x))
-#define __constant_cpu_to_be64(x) ((__u64)(x))
-#define __constant_be64_to_cpu(x) ((__u64)(x))
-#define __constant_cpu_to_be32(x) ((__u32)(x))
-#define __constant_be32_to_cpu(x) ((__u32)(x))
-#define __constant_cpu_to_be16(x) ((__u16)(x))
-#define __constant_be16_to_cpu(x) ((__u16)(x))
-#define __cpu_to_le64(x) __swab64((x))
-#define __le64_to_cpu(x) __swab64((x))
-#define __cpu_to_le32(x) __swab32((x))
-#define __le32_to_cpu(x) __swab32((x))
-#define __cpu_to_le16(x) __swab16((x))
-#define __le16_to_cpu(x) __swab16((x))
-#define __cpu_to_be64(x) ((__u64)(x))
-#define __be64_to_cpu(x) ((__u64)(x))
-#define __cpu_to_be32(x) ((__u32)(x))
-#define __be32_to_cpu(x) ((__u32)(x))
-#define __cpu_to_be16(x) ((__u16)(x))
-#define __be16_to_cpu(x) ((__u16)(x))
-#define __cpu_to_le64p(x) __swab64p((x))
-#define __le64_to_cpup(x) __swab64p((x))
-#define __cpu_to_le32p(x) __swab32p((x))
-#define __le32_to_cpup(x) __swab32p((x))
-#define __cpu_to_le16p(x) __swab16p((x))
-#define __le16_to_cpup(x) __swab16p((x))
-#define __cpu_to_be64p(x) (*(__u64*)(x))
-#define __be64_to_cpup(x) (*(__u64*)(x))
-#define __cpu_to_be32p(x) (*(__u32*)(x))
-#define __be32_to_cpup(x) (*(__u32*)(x))
-#define __cpu_to_be16p(x) (*(__u16*)(x))
-#define __be16_to_cpup(x) (*(__u16*)(x))
+#define __constant_htonl(x) ((__force __be32)(__u32)(x))
+#define __constant_ntohl(x) ((__force __u32)(__be32)(x))
+#define __constant_htons(x) ((__force __be16)(__u16)(x))
+#define __constant_ntohs(x) ((__force __u16)(__be16)(x))
+#define __constant_cpu_to_le64(x) ((__force __le64)___constant_swab64((x)))
+#define __constant_le64_to_cpu(x) ___constant_swab64((__force __u64)(__le64)(x))
+#define __constant_cpu_to_le32(x) ((__force __le32)___constant_swab32((x)))
+#define __constant_le32_to_cpu(x) ___constant_swab32((__force __u32)(__le32)(x))
+#define __constant_cpu_to_le16(x) ((__force __le16)___constant_swab16((x)))
+#define __constant_le16_to_cpu(x) ___constant_swab16((__force __u16)(__le16)(x))
+#define __constant_cpu_to_be64(x) ((__force __be64)(__u64)(x))
+#define __constant_be64_to_cpu(x) ((__force __u64)(__be64)(x))
+#define __constant_cpu_to_be32(x) ((__force __be32)(__u32)(x))
+#define __constant_be32_to_cpu(x) ((__force __u32)(__be32)(x))
+#define __constant_cpu_to_be16(x) ((__force __be16)(__u16)(x))
+#define __constant_be16_to_cpu(x) ((__force __u16)(__be16)(x))
+#define __cpu_to_le64(x) ((__force __le64)__swab64((x)))
+#define __le64_to_cpu(x) __swab64((__force __u64)(__le64)(x))
+#define __cpu_to_le32(x) ((__force __le32)__swab32((x)))
+#define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x))
+#define __cpu_to_le16(x) ((__force __le16)__swab16((x)))
+#define __le16_to_cpu(x) __swab16((__force __u16)(__le16)(x))
+#define __cpu_to_be64(x) ((__force __be64)(__u64)(x))
+#define __be64_to_cpu(x) ((__force __u64)(__be64)(x))
+#define __cpu_to_be32(x) ((__force __be32)(__u32)(x))
+#define __be32_to_cpu(x) ((__force __u32)(__be32)(x))
+#define __cpu_to_be16(x) ((__force __be16)(__u16)(x))
+#define __be16_to_cpu(x) ((__force __u16)(__be16)(x))
+
+static inline __le64 __cpu_to_le64p(const __u64 *p)
+{
+ return (__force __le64)__swab64p(p);
+}
+static inline __u64 __le64_to_cpup(const __le64 *p)
+{
+ return __swab64p((__u64 *)p);
+}
+static inline __le32 __cpu_to_le32p(const __u32 *p)
+{
+ return (__force __le32)__swab32p(p);
+}
+static inline __u32 __le32_to_cpup(const __le32 *p)
+{
+ return __swab32p((__u32 *)p);
+}
+static inline __le16 __cpu_to_le16p(const __u16 *p)
+{
+ return (__force __le16)__swab16p(p);
+}
+static inline __u16 __le16_to_cpup(const __le16 *p)
+{
+ return __swab16p((__u16 *)p);
+}
+static inline __be64 __cpu_to_be64p(const __u64 *p)
+{
+ return (__force __be64)*p;
+}
+static inline __u64 __be64_to_cpup(const __be64 *p)
+{
+ return (__force __u64)*p;
+}
+static inline __be32 __cpu_to_be32p(const __u32 *p)
+{
+ return (__force __be32)*p;
+}
+static inline __u32 __be32_to_cpup(const __be32 *p)
+{
+ return (__force __u32)*p;
+}
+static inline __be16 __cpu_to_be16p(const __u16 *p)
+{
+ return (__force __be16)*p;
+}
+static inline __u16 __be16_to_cpup(const __be16 *p)
+{
+ return (__force __u16)*p;
+}
#define __cpu_to_le64s(x) __swab64s((x))
#define __le64_to_cpus(x) __swab64s((x))
#define __cpu_to_le32s(x) __swab32s((x))
#define __le32_to_cpus(x) __swab32s((x))
#define __cpu_to_le16s(x) __swab16s((x))
#define __le16_to_cpus(x) __swab16s((x))
-#define __cpu_to_be64s(x) do {} while (0)
-#define __be64_to_cpus(x) do {} while (0)
-#define __cpu_to_be32s(x) do {} while (0)
-#define __be32_to_cpus(x) do {} while (0)
-#define __cpu_to_be16s(x) do {} while (0)
-#define __be16_to_cpus(x) do {} while (0)
+#define __cpu_to_be64s(x) do { (void)(x); } while (0)
+#define __be64_to_cpus(x) do { (void)(x); } while (0)
+#define __cpu_to_be32s(x) do { (void)(x); } while (0)
+#define __be32_to_cpus(x) do { (void)(x); } while (0)
+#define __cpu_to_be16s(x) do { (void)(x); } while (0)
+#define __be16_to_cpus(x) do { (void)(x); } while (0)
+#ifdef __KERNEL__
#include <linux/byteorder/generic.h>
+#endif
#endif /* _LINUX_BYTEORDER_BIG_ENDIAN_H */
diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
index a46f3ec..a4cb3bf 100644
--- a/include/linux/byteorder/little_endian.h
+++ b/include/linux/byteorder/little_endian.h
@@ -9,54 +9,93 @@
#endif
#define __BYTE_ORDER __LITTLE_ENDIAN
+#include <linux/compiler.h>
+#include <linux/types.h>
#include <linux/byteorder/swab.h>
-#define __constant_htonl(x) ___constant_swab32((x))
-#define __constant_ntohl(x) ___constant_swab32((x))
-#define __constant_htons(x) ___constant_swab16((x))
-#define __constant_ntohs(x) ___constant_swab16((x))
-#define __constant_cpu_to_le64(x) ((__u64)(x))
-#define __constant_le64_to_cpu(x) ((__u64)(x))
-#define __constant_cpu_to_le32(x) ((__u32)(x))
-#define __constant_le32_to_cpu(x) ((__u32)(x))
-#define __constant_cpu_to_le16(x) ((__u16)(x))
-#define __constant_le16_to_cpu(x) ((__u16)(x))
-#define __constant_cpu_to_be64(x) ___constant_swab64((x))
-#define __constant_be64_to_cpu(x) ___constant_swab64((x))
-#define __constant_cpu_to_be32(x) ___constant_swab32((x))
-#define __constant_be32_to_cpu(x) ___constant_swab32((x))
-#define __constant_cpu_to_be16(x) ___constant_swab16((x))
-#define __constant_be16_to_cpu(x) ___constant_swab16((x))
-#define __cpu_to_le64(x) ((__u64)(x))
-#define __le64_to_cpu(x) ((__u64)(x))
-#define __cpu_to_le32(x) ((__u32)(x))
-#define __le32_to_cpu(x) ((__u32)(x))
-#define __cpu_to_le16(x) ((__u16)(x))
-#define __le16_to_cpu(x) ((__u16)(x))
-#define __cpu_to_be64(x) __swab64((x))
-#define __be64_to_cpu(x) __swab64((x))
-#define __cpu_to_be32(x) __swab32((x))
-#define __be32_to_cpu(x) __swab32((x))
-#define __cpu_to_be16(x) __swab16((x))
-#define __be16_to_cpu(x) __swab16((x))
-#define __cpu_to_le64p(x) (*(__u64*)(x))
-#define __le64_to_cpup(x) (*(__u64*)(x))
-#define __cpu_to_le32p(x) (*(__u32*)(x))
-#define __le32_to_cpup(x) (*(__u32*)(x))
-#define __cpu_to_le16p(x) (*(__u16*)(x))
-#define __le16_to_cpup(x) (*(__u16*)(x))
-#define __cpu_to_be64p(x) __swab64p((x))
-#define __be64_to_cpup(x) __swab64p((x))
-#define __cpu_to_be32p(x) __swab32p((x))
-#define __be32_to_cpup(x) __swab32p((x))
-#define __cpu_to_be16p(x) __swab16p((x))
-#define __be16_to_cpup(x) __swab16p((x))
-#define __cpu_to_le64s(x) do {} while (0)
-#define __le64_to_cpus(x) do {} while (0)
-#define __cpu_to_le32s(x) do {} while (0)
-#define __le32_to_cpus(x) do {} while (0)
-#define __cpu_to_le16s(x) do {} while (0)
-#define __le16_to_cpus(x) do {} while (0)
+#define __constant_htonl(x) ((__force __be32)___constant_swab32((x)))
+#define __constant_ntohl(x) ___constant_swab32((__force __be32)(x))
+#define __constant_htons(x) ((__force __be16)___constant_swab16((x)))
+#define __constant_ntohs(x) ___constant_swab16((__force __be16)(x))
+#define __constant_cpu_to_le64(x) ((__force __le64)(__u64)(x))
+#define __constant_le64_to_cpu(x) ((__force __u64)(__le64)(x))
+#define __constant_cpu_to_le32(x) ((__force __le32)(__u32)(x))
+#define __constant_le32_to_cpu(x) ((__force __u32)(__le32)(x))
+#define __constant_cpu_to_le16(x) ((__force __le16)(__u16)(x))
+#define __constant_le16_to_cpu(x) ((__force __u16)(__le16)(x))
+#define __constant_cpu_to_be64(x) ((__force __be64)___constant_swab64((x)))
+#define __constant_be64_to_cpu(x) ___constant_swab64((__force __u64)(__be64)(x))
+#define __constant_cpu_to_be32(x) ((__force __be32)___constant_swab32((x)))
+#define __constant_be32_to_cpu(x) ___constant_swab32((__force __u32)(__be32)(x))
+#define __constant_cpu_to_be16(x) ((__force __be16)___constant_swab16((x)))
+#define __constant_be16_to_cpu(x) ___constant_swab16((__force __u16)(__be16)(x))
+#define __cpu_to_le64(x) ((__force __le64)(__u64)(x))
+#define __le64_to_cpu(x) ((__force __u64)(__le64)(x))
+#define __cpu_to_le32(x) ((__force __le32)(__u32)(x))
+#define __le32_to_cpu(x) ((__force __u32)(__le32)(x))
+#define __cpu_to_le16(x) ((__force __le16)(__u16)(x))
+#define __le16_to_cpu(x) ((__force __u16)(__le16)(x))
+#define __cpu_to_be64(x) ((__force __be64)__swab64((x)))
+#define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x))
+#define __cpu_to_be32(x) ((__force __be32)__swab32((x)))
+#define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x))
+#define __cpu_to_be16(x) ((__force __be16)__swab16((x)))
+#define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x))
+
+static inline __le64 __cpu_to_le64p(const __u64 *p)
+{
+ return (__force __le64)*p;
+}
+static inline __u64 __le64_to_cpup(const __le64 *p)
+{
+ return (__force __u64)*p;
+}
+static inline __le32 __cpu_to_le32p(const __u32 *p)
+{
+ return (__force __le32)*p;
+}
+static inline __u32 __le32_to_cpup(const __le32 *p)
+{
+ return (__force __u32)*p;
+}
+static inline __le16 __cpu_to_le16p(const __u16 *p)
+{
+ return (__force __le16)*p;
+}
+static inline __u16 __le16_to_cpup(const __le16 *p)
+{
+ return (__force __u16)*p;
+}
+static inline __be64 __cpu_to_be64p(const __u64 *p)
+{
+ return (__force __be64)__swab64p(p);
+}
+static inline __u64 __be64_to_cpup(const __be64 *p)
+{
+ return __swab64p((__u64 *)p);
+}
+static inline __be32 __cpu_to_be32p(const __u32 *p)
+{
+ return (__force __be32)__swab32p(p);
+}
+static inline __u32 __be32_to_cpup(const __be32 *p)
+{
+ return __swab32p((__u32 *)p);
+}
+static inline __be16 __cpu_to_be16p(const __u16 *p)
+{
+ return (__force __be16)__swab16p(p);
+}
+static inline __u16 __be16_to_cpup(const __be16 *p)
+{
+ return __swab16p((__u16 *)p);
+}
+#define __cpu_to_le64s(x) do { (void)(x); } while (0)
+#define __le64_to_cpus(x) do { (void)(x); } while (0)
+#define __cpu_to_le32s(x) do { (void)(x); } while (0)
+#define __le32_to_cpus(x) do { (void)(x); } while (0)
+#define __cpu_to_le16s(x) do { (void)(x); } while (0)
+#define __le16_to_cpus(x) do { (void)(x); } while (0)
#define __cpu_to_be64s(x) __swab64s((x))
#define __be64_to_cpus(x) __swab64s((x))
#define __cpu_to_be32s(x) __swab32s((x))
@@ -64,6 +103,8 @@
#define __cpu_to_be16s(x) __swab16s((x))
#define __be16_to_cpus(x) __swab16s((x))
+#ifdef __KERNEL__
#include <linux/byteorder/generic.h>
+#endif
#endif /* _LINUX_BYTEORDER_LITTLE_ENDIAN_H */
diff --git a/include/linux/byteorder/swab.h b/include/linux/byteorder/swab.h
index b1d570e..bb4a046 100644
--- a/include/linux/byteorder/swab.h
+++ b/include/linux/byteorder/swab.h
@@ -100,7 +100,7 @@ static __inline__ __attribute__((const)) __u16 __fswab16(__u16 x)
{
return __arch__swab16(x);
}
-static __inline__ __u16 __swab16p(__u16 *x)
+static __inline__ __u16 __swab16p(const __u16 *x)
{
return __arch__swab16p(x);
}
@@ -113,7 +113,7 @@ static __inline__ __attribute__((const)) __u32 __fswab32(__u32 x)
{
return __arch__swab32(x);
}
-static __inline__ __u32 __swab32p(__u32 *x)
+static __inline__ __u32 __swab32p(const __u32 *x)
{
return __arch__swab32p(x);
}
@@ -133,7 +133,7 @@ static __inline__ __attribute__((const)) __u64 __fswab64(__u64 x)
return __arch__swab64(x);
# endif
}
-static __inline__ __u64 __swab64p(__u64 *x)
+static __inline__ __u64 __swab64p(const __u64 *x)
{
return __arch__swab64p(x);
}