diff options
author | Remy Bohmer <linux@bohmer.net> | 2009-10-29 12:29:37 +0100 |
---|---|---|
committer | Tom Rix <Tom.Rix@windriver.com> | 2009-11-07 15:56:30 -0600 |
commit | 25793f76bf9a7be59c9415ef0f78d034e8d53dae (patch) | |
tree | 4063b6e96fdc73e0f05eafd04c3f6c020c9044e2 | |
parent | b91b8f74fe9ded18344c3d03080a4abc07254502 (diff) | |
download | u-boot-imx-25793f76bf9a7be59c9415ef0f78d034e8d53dae.zip u-boot-imx-25793f76bf9a7be59c9415ef0f78d034e8d53dae.tar.gz u-boot-imx-25793f76bf9a7be59c9415ef0f78d034e8d53dae.tar.bz2 |
ARM: Use Linux version for unaligned access code
The asm-arm/unaligned.h includes linux/unaligned/access_ok.h
This file is unsafe to be used on ARM, since it does an unaligned memory
accesses which fails on ARM.
Lookin at Linux the basic difference seems to be the header
"include/asm-arm/unaligned.h". The Linux version of "unaligned.h"
does *not* include "access_ok.h" at all. It includes "le_byteshift.h"
and "be_byteshift.h" instead.
Signed-off-by: Remy Bohmer <linux@bohmer.net>
Signed-off-by: Stefan Roese <sr@denx.de>
--
include/asm-arm/unaligned.h | 3 -
include/linux/unaligned/be_byteshift.h | 70 +++++++++++++++++++++++++++++++++
include/linux/unaligned/le_byteshift.h | 70 +++++++++++++++++++++++++++++++++
3 files changed, 142 insertions(+), 1 deletion(-)
create mode 100644 include/linux/unaligned/be_byteshift.h
create mode 100644 include/linux/unaligned/le_byteshift.h
-rw-r--r-- | include/asm-arm/unaligned.h | 3 | ||||
-rw-r--r-- | include/linux/unaligned/be_byteshift.h | 70 | ||||
-rw-r--r-- | include/linux/unaligned/le_byteshift.h | 70 |
3 files changed, 142 insertions, 1 deletions
diff --git a/include/asm-arm/unaligned.h b/include/asm-arm/unaligned.h index d644df7..44593a8 100644 --- a/include/asm-arm/unaligned.h +++ b/include/asm-arm/unaligned.h @@ -1,7 +1,8 @@ #ifndef _ASM_ARM_UNALIGNED_H #define _ASM_ARM_UNALIGNED_H -#include <linux/unaligned/access_ok.h> +#include <linux/unaligned/le_byteshift.h> +#include <linux/unaligned/be_byteshift.h> #include <linux/unaligned/generic.h> /* diff --git a/include/linux/unaligned/be_byteshift.h b/include/linux/unaligned/be_byteshift.h new file mode 100644 index 0000000..9356b24 --- /dev/null +++ b/include/linux/unaligned/be_byteshift.h @@ -0,0 +1,70 @@ +#ifndef _LINUX_UNALIGNED_BE_BYTESHIFT_H +#define _LINUX_UNALIGNED_BE_BYTESHIFT_H + +#include <linux/types.h> + +static inline u16 __get_unaligned_be16(const u8 *p) +{ + return p[0] << 8 | p[1]; +} + +static inline u32 __get_unaligned_be32(const u8 *p) +{ + return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3]; +} + +static inline u64 __get_unaligned_be64(const u8 *p) +{ + return (u64)__get_unaligned_be32(p) << 32 | + __get_unaligned_be32(p + 4); +} + +static inline void __put_unaligned_be16(u16 val, u8 *p) +{ + *p++ = val >> 8; + *p++ = val; +} + +static inline void __put_unaligned_be32(u32 val, u8 *p) +{ + __put_unaligned_be16(val >> 16, p); + __put_unaligned_be16(val, p + 2); +} + +static inline void __put_unaligned_be64(u64 val, u8 *p) +{ + __put_unaligned_be32(val >> 32, p); + __put_unaligned_be32(val, p + 4); +} + +static inline u16 get_unaligned_be16(const void *p) +{ + return __get_unaligned_be16((const u8 *)p); +} + +static inline u32 get_unaligned_be32(const void *p) +{ + return __get_unaligned_be32((const u8 *)p); +} + +static inline u64 get_unaligned_be64(const void *p) +{ + return __get_unaligned_be64((const u8 *)p); +} + +static inline void put_unaligned_be16(u16 val, void *p) +{ + __put_unaligned_be16(val, p); +} + +static inline void put_unaligned_be32(u32 val, void *p) +{ + __put_unaligned_be32(val, p); +} + +static inline void put_unaligned_be64(u64 val, void *p) +{ + __put_unaligned_be64(val, p); +} + +#endif /* _LINUX_UNALIGNED_BE_BYTESHIFT_H */ diff --git a/include/linux/unaligned/le_byteshift.h b/include/linux/unaligned/le_byteshift.h new file mode 100644 index 0000000..be376fb --- /dev/null +++ b/include/linux/unaligned/le_byteshift.h @@ -0,0 +1,70 @@ +#ifndef _LINUX_UNALIGNED_LE_BYTESHIFT_H +#define _LINUX_UNALIGNED_LE_BYTESHIFT_H + +#include <linux/types.h> + +static inline u16 __get_unaligned_le16(const u8 *p) +{ + return p[0] | p[1] << 8; +} + +static inline u32 __get_unaligned_le32(const u8 *p) +{ + return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24; +} + +static inline u64 __get_unaligned_le64(const u8 *p) +{ + return (u64)__get_unaligned_le32(p + 4) << 32 | + __get_unaligned_le32(p); +} + +static inline void __put_unaligned_le16(u16 val, u8 *p) +{ + *p++ = val; + *p++ = val >> 8; +} + +static inline void __put_unaligned_le32(u32 val, u8 *p) +{ + __put_unaligned_le16(val >> 16, p + 2); + __put_unaligned_le16(val, p); +} + +static inline void __put_unaligned_le64(u64 val, u8 *p) +{ + __put_unaligned_le32(val >> 32, p + 4); + __put_unaligned_le32(val, p); +} + +static inline u16 get_unaligned_le16(const void *p) +{ + return __get_unaligned_le16((const u8 *)p); +} + +static inline u32 get_unaligned_le32(const void *p) +{ + return __get_unaligned_le32((const u8 *)p); +} + +static inline u64 get_unaligned_le64(const void *p) +{ + return __get_unaligned_le64((const u8 *)p); +} + +static inline void put_unaligned_le16(u16 val, void *p) +{ + __put_unaligned_le16(val, p); +} + +static inline void put_unaligned_le32(u32 val, void *p) +{ + __put_unaligned_le32(val, p); +} + +static inline void put_unaligned_le64(u64 val, void *p) +{ + __put_unaligned_le64(val, p); +} + +#endif /* _LINUX_UNALIGNED_LE_BYTESHIFT_H */ |