/contrib/media/updf/include/linux/byteorder/big_endian.h |
---|
0,0 → 1,68 |
#ifndef _LINUX_BYTEORDER_BIG_ENDIAN_H |
#define _LINUX_BYTEORDER_BIG_ENDIAN_H |
#ifndef __BIG_ENDIAN |
#define __BIG_ENDIAN 4321 |
#endif |
#ifndef __BIG_ENDIAN_BITFIELD |
#define __BIG_ENDIAN_BITFIELD |
#endif |
#include <linux/byteorder/swab.h> |
#define __constant_htonl(x) ((__u32)(x)) |
#define __constant_ntohl(x) ((__u32)(x)) |
#define __constant_htons(x) ((__u16)(x)) |
#define __constant_ntohs(x) ((__u16)(x)) |
#define __constant_cpu_to_le64(x) ___constant_swab64((x)) |
#define __constant_le64_to_cpu(x) ___constant_swab64((x)) |
#define __constant_cpu_to_le32(x) ___constant_swab32((x)) |
#define __constant_le32_to_cpu(x) ___constant_swab32((x)) |
#define __constant_cpu_to_le16(x) ___constant_swab16((x)) |
#define __constant_le16_to_cpu(x) ___constant_swab16((x)) |
#define __constant_cpu_to_be64(x) ((__u64)(x)) |
#define __constant_be64_to_cpu(x) ((__u64)(x)) |
#define __constant_cpu_to_be32(x) ((__u32)(x)) |
#define __constant_be32_to_cpu(x) ((__u32)(x)) |
#define __constant_cpu_to_be16(x) ((__u16)(x)) |
#define __constant_be16_to_cpu(x) ((__u16)(x)) |
#define __cpu_to_le64(x) __swab64((x)) |
#define __le64_to_cpu(x) __swab64((x)) |
#define __cpu_to_le32(x) __swab32((x)) |
#define __le32_to_cpu(x) __swab32((x)) |
#define __cpu_to_le16(x) __swab16((x)) |
#define __le16_to_cpu(x) __swab16((x)) |
#define __cpu_to_be64(x) ((__u64)(x)) |
#define __be64_to_cpu(x) ((__u64)(x)) |
#define __cpu_to_be32(x) ((__u32)(x)) |
#define __be32_to_cpu(x) ((__u32)(x)) |
#define __cpu_to_be16(x) ((__u16)(x)) |
#define __be16_to_cpu(x) ((__u16)(x)) |
#define __cpu_to_le64p(x) __swab64p((x)) |
#define __le64_to_cpup(x) __swab64p((x)) |
#define __cpu_to_le32p(x) __swab32p((x)) |
#define __le32_to_cpup(x) __swab32p((x)) |
#define __cpu_to_le16p(x) __swab16p((x)) |
#define __le16_to_cpup(x) __swab16p((x)) |
#define __cpu_to_be64p(x) (*(__u64*)(x)) |
#define __be64_to_cpup(x) (*(__u64*)(x)) |
#define __cpu_to_be32p(x) (*(__u32*)(x)) |
#define __be32_to_cpup(x) (*(__u32*)(x)) |
#define __cpu_to_be16p(x) (*(__u16*)(x)) |
#define __be16_to_cpup(x) (*(__u16*)(x)) |
#define __cpu_to_le64s(x) __swab64s((x)) |
#define __le64_to_cpus(x) __swab64s((x)) |
#define __cpu_to_le32s(x) __swab32s((x)) |
#define __le32_to_cpus(x) __swab32s((x)) |
#define __cpu_to_le16s(x) __swab16s((x)) |
#define __le16_to_cpus(x) __swab16s((x)) |
#define __cpu_to_be64s(x) do {} while (0) |
#define __be64_to_cpus(x) do {} while (0) |
#define __cpu_to_be32s(x) do {} while (0) |
#define __be32_to_cpus(x) do {} while (0) |
#define __cpu_to_be16s(x) do {} while (0) |
#define __be16_to_cpus(x) do {} while (0) |
#include <linux/byteorder/generic.h> |
#endif /* _LINUX_BYTEORDER_BIG_ENDIAN_H */ |
/contrib/media/updf/include/linux/byteorder/generic.h |
---|
0,0 → 1,180 |
#ifndef _LINUX_BYTEORDER_GENERIC_H |
#define _LINUX_BYTEORDER_GENERIC_H |
/* |
* linux/byteorder_generic.h |
* Generic Byte-reordering support |
* |
* Francois-Rene Rideau <fare@tunes.org> 19970707 |
* gathered all the good ideas from all asm-foo/byteorder.h into one file, |
* cleaned them up. |
* I hope it is compliant with non-GCC compilers. |
* I decided to put __BYTEORDER_HAS_U64__ in byteorder.h, |
* because I wasn't sure it would be ok to put it in types.h |
* Upgraded it to 2.1.43 |
* Francois-Rene Rideau <fare@tunes.org> 19971012 |
* Upgraded it to 2.1.57 |
* to please Linus T., replaced huge #ifdef's between little/big endian |
* by nestedly #include'd files. |
* Francois-Rene Rideau <fare@tunes.org> 19971205 |
* Made it to 2.1.71; now a facelift: |
* Put files under include/linux/byteorder/ |
* Split swab from generic support. |
* |
* TODO: |
* = Regular kernel maintainers could also replace all these manual |
* byteswap macros that remain, disseminated among drivers, |
* after some grep or the sources... |
* = Linus might want to rename all these macros and files to fit his taste, |
* to fit his personal naming scheme. |
* = it seems that a few drivers would also appreciate |
* nybble swapping support... |
* = every architecture could add their byteswap macro in asm/byteorder.h |
* see how some architectures already do (i386, alpha, ppc, etc) |
* = cpu_to_beXX and beXX_to_cpu might some day need to be well |
* distinguished throughout the kernel. This is not the case currently, |
* since little endian, big endian, and pdp endian machines needn't it. |
* But this might be the case for, say, a port of Linux to 20/21 bit |
* architectures (and F21 Linux addict around?). |
*/ |
/* |
* The following macros are to be defined by <asm/byteorder.h>: |
* |
* Conversion of long and short int between network and host format |
* ntohl(__u32 x) |
* ntohs(__u16 x) |
* htonl(__u32 x) |
* htons(__u16 x) |
* It seems that some programs (which? where? or perhaps a standard? POSIX?) |
* might like the above to be functions, not macros (why?). |
* if that's true, then detect them, and take measures. |
* Anyway, the measure is: define only ___ntohl as a macro instead, |
* and in a separate file, have |
* unsigned long inline ntohl(x){return ___ntohl(x);} |
* |
* The same for constant arguments |
* __constant_ntohl(__u32 x) |
* __constant_ntohs(__u16 x) |
* __constant_htonl(__u32 x) |
* __constant_htons(__u16 x) |
* |
* Conversion of XX-bit integers (16- 32- or 64-) |
* between native CPU format and little/big endian format |
* 64-bit stuff only defined for proper architectures |
* cpu_to_[bl]eXX(__uXX x) |
* [bl]eXX_to_cpu(__uXX x) |
* |
* The same, but takes a pointer to the value to convert |
* cpu_to_[bl]eXXp(__uXX x) |
* [bl]eXX_to_cpup(__uXX x) |
* |
* The same, but change in situ |
* cpu_to_[bl]eXXs(__uXX x) |
* [bl]eXX_to_cpus(__uXX x) |
* |
* See asm-foo/byteorder.h for examples of how to provide |
* architecture-optimized versions |
* |
*/ |
#if defined(__KERNEL__) |
/* |
* inside the kernel, we can use nicknames; |
* outside of it, we must avoid POSIX namespace pollution... |
*/ |
#define cpu_to_le64 __cpu_to_le64 |
#define le64_to_cpu __le64_to_cpu |
#define cpu_to_le32 __cpu_to_le32 |
#define le32_to_cpu __le32_to_cpu |
#define cpu_to_le16 __cpu_to_le16 |
#define le16_to_cpu __le16_to_cpu |
#define cpu_to_be64 __cpu_to_be64 |
#define be64_to_cpu __be64_to_cpu |
#define cpu_to_be32 __cpu_to_be32 |
#define be32_to_cpu __be32_to_cpu |
#define cpu_to_be16 __cpu_to_be16 |
#define be16_to_cpu __be16_to_cpu |
#define cpu_to_le64p __cpu_to_le64p |
#define le64_to_cpup __le64_to_cpup |
#define cpu_to_le32p __cpu_to_le32p |
#define le32_to_cpup __le32_to_cpup |
#define cpu_to_le16p __cpu_to_le16p |
#define le16_to_cpup __le16_to_cpup |
#define cpu_to_be64p __cpu_to_be64p |
#define be64_to_cpup __be64_to_cpup |
#define cpu_to_be32p __cpu_to_be32p |
#define be32_to_cpup __be32_to_cpup |
#define cpu_to_be16p __cpu_to_be16p |
#define be16_to_cpup __be16_to_cpup |
#define cpu_to_le64s __cpu_to_le64s |
#define le64_to_cpus __le64_to_cpus |
#define cpu_to_le32s __cpu_to_le32s |
#define le32_to_cpus __le32_to_cpus |
#define cpu_to_le16s __cpu_to_le16s |
#define le16_to_cpus __le16_to_cpus |
#define cpu_to_be64s __cpu_to_be64s |
#define be64_to_cpus __be64_to_cpus |
#define cpu_to_be32s __cpu_to_be32s |
#define be32_to_cpus __be32_to_cpus |
#define cpu_to_be16s __cpu_to_be16s |
#define be16_to_cpus __be16_to_cpus |
#endif |
/* |
* Handle ntohl and suches. These have various compatibility |
* issues - like we want to give the prototype even though we |
* also have a macro for them in case some strange program |
* wants to take the address of the thing or something.. |
* |
* Note that these used to return a "long" in libc5, even though |
* long is often 64-bit these days.. Thus the casts. |
* |
* They have to be macros in order to do the constant folding |
* correctly - if the argument passed into a inline function |
* it is no longer constant according to gcc.. |
*/ |
#undef ntohl |
#undef ntohs |
#undef htonl |
#undef htons |
/* |
* Do the prototypes. Somebody might want to take the |
* address or some such sick thing.. |
*/ |
#if defined(__KERNEL__) || (defined (__GLIBC__) && __GLIBC__ >= 2) |
extern __u32 ntohl(__u32); |
extern __u32 htonl(__u32); |
#else |
extern unsigned long int ntohl(unsigned long int); |
extern unsigned long int htonl(unsigned long int); |
#endif |
extern unsigned short int ntohs(unsigned short int); |
extern unsigned short int htons(unsigned short int); |
#if defined(__GNUC__) && (__GNUC__ >= 2) && defined(__OPTIMIZE__) |
#define ___htonl(x) __cpu_to_be32(x) |
#define ___htons(x) __cpu_to_be16(x) |
#define ___ntohl(x) __be32_to_cpu(x) |
#define ___ntohs(x) __be16_to_cpu(x) |
#if defined(__KERNEL__) || (defined (__GLIBC__) && __GLIBC__ >= 2) |
#define htonl(x) ___htonl(x) |
#define ntohl(x) ___ntohl(x) |
#else |
#define htonl(x) ((unsigned long)___htonl(x)) |
#define ntohl(x) ((unsigned long)___ntohl(x)) |
#endif |
#define htons(x) ___htons(x) |
#define ntohs(x) ___ntohs(x) |
#endif /* OPTIMIZE */ |
#endif /* _LINUX_BYTEORDER_GENERIC_H */ |
/contrib/media/updf/include/linux/byteorder/little_endian.h |
---|
0,0 → 1,68 |
#ifndef _LINUX_BYTEORDER_LITTLE_ENDIAN_H |
#define _LINUX_BYTEORDER_LITTLE_ENDIAN_H |
#ifndef __LITTLE_ENDIAN |
#define __LITTLE_ENDIAN 1234 |
#endif |
#ifndef __LITTLE_ENDIAN_BITFIELD |
#define __LITTLE_ENDIAN_BITFIELD |
#endif |
#include <linux/byteorder/swab.h> |
#define __constant_htonl(x) ___constant_swab32((x)) |
#define __constant_ntohl(x) ___constant_swab32((x)) |
#define __constant_htons(x) ___constant_swab16((x)) |
#define __constant_ntohs(x) ___constant_swab16((x)) |
#define __constant_cpu_to_le64(x) ((__u64)(x)) |
#define __constant_le64_to_cpu(x) ((__u64)(x)) |
#define __constant_cpu_to_le32(x) ((__u32)(x)) |
#define __constant_le32_to_cpu(x) ((__u32)(x)) |
#define __constant_cpu_to_le16(x) ((__u16)(x)) |
#define __constant_le16_to_cpu(x) ((__u16)(x)) |
#define __constant_cpu_to_be64(x) ___constant_swab64((x)) |
#define __constant_be64_to_cpu(x) ___constant_swab64((x)) |
#define __constant_cpu_to_be32(x) ___constant_swab32((x)) |
#define __constant_be32_to_cpu(x) ___constant_swab32((x)) |
#define __constant_cpu_to_be16(x) ___constant_swab16((x)) |
#define __constant_be16_to_cpu(x) ___constant_swab16((x)) |
#define __cpu_to_le64(x) ((__u64)(x)) |
#define __le64_to_cpu(x) ((__u64)(x)) |
#define __cpu_to_le32(x) ((__u32)(x)) |
#define __le32_to_cpu(x) ((__u32)(x)) |
#define __cpu_to_le16(x) ((__u16)(x)) |
#define __le16_to_cpu(x) ((__u16)(x)) |
#define __cpu_to_be64(x) __swab64((x)) |
#define __be64_to_cpu(x) __swab64((x)) |
#define __cpu_to_be32(x) __swab32((x)) |
#define __be32_to_cpu(x) __swab32((x)) |
#define __cpu_to_be16(x) __swab16((x)) |
#define __be16_to_cpu(x) __swab16((x)) |
#define __cpu_to_le64p(x) (*(__u64*)(x)) |
#define __le64_to_cpup(x) (*(__u64*)(x)) |
#define __cpu_to_le32p(x) (*(__u32*)(x)) |
#define __le32_to_cpup(x) (*(__u32*)(x)) |
#define __cpu_to_le16p(x) (*(__u16*)(x)) |
#define __le16_to_cpup(x) (*(__u16*)(x)) |
#define __cpu_to_be64p(x) __swab64p((x)) |
#define __be64_to_cpup(x) __swab64p((x)) |
#define __cpu_to_be32p(x) __swab32p((x)) |
#define __be32_to_cpup(x) __swab32p((x)) |
#define __cpu_to_be16p(x) __swab16p((x)) |
#define __be16_to_cpup(x) __swab16p((x)) |
#define __cpu_to_le64s(x) do {} while (0) |
#define __le64_to_cpus(x) do {} while (0) |
#define __cpu_to_le32s(x) do {} while (0) |
#define __le32_to_cpus(x) do {} while (0) |
#define __cpu_to_le16s(x) do {} while (0) |
#define __le16_to_cpus(x) do {} while (0) |
#define __cpu_to_be64s(x) __swab64s((x)) |
#define __be64_to_cpus(x) __swab64s((x)) |
#define __cpu_to_be32s(x) __swab32s((x)) |
#define __be32_to_cpus(x) __swab32s((x)) |
#define __cpu_to_be16s(x) __swab16s((x)) |
#define __be16_to_cpus(x) __swab16s((x)) |
#include <linux/byteorder/generic.h> |
#endif /* _LINUX_BYTEORDER_LITTLE_ENDIAN_H */ |
/contrib/media/updf/include/linux/byteorder/pdp_endian.h |
---|
0,0 → 1,88 |
#ifndef _LINUX_BYTEORDER_PDP_ENDIAN_H |
#define _LINUX_BYTEORDER_PDP_ENDIAN_H |
/* |
* Could have been named NUXI-endian, but we use the same name as in glibc. |
* hopefully only the PDP and its evolutions (old VAXen in compatibility mode) |
* should ever use this braindead byteorder. |
* This file *should* work, but has not been tested. |
* |
* little-endian is 1234; big-endian is 4321; nuxi/pdp-endian is 3412 |
* |
* I thought vaxen were NUXI-endian, but was told they were correct-endian |
* (little-endian), though indeed there existed NUXI-endian machines |
* (DEC PDP-11 and old VAXen in compatibility mode). |
* This makes this file a bit useless, but as a proof-of-concept. |
* |
* But what does a __u64 look like: is it 34127856 or 78563412 ??? |
* I don't dare imagine! Hence, no 64-bit byteorder support yet. |
* Hopefully, there 64-bit pdp-endian support shouldn't ever be required. |
* |
*/ |
#ifndef __PDP_ENDIAN |
#define __PDP_ENDIAN 3412 |
#endif |
#ifndef __PDP_ENDIAN_BITFIELD |
#define __PDP_ENDIAN_BITFIELD |
#endif |
#include <linux/byteorder/swab.h> |
#include <linux/byteorder/swabb.h> |
#define __constant_htonl(x) ___constant_swahb32((x)) |
#define __constant_ntohl(x) ___constant_swahb32((x)) |
#define __constant_htons(x) ___constant_swab16((x)) |
#define __constant_ntohs(x) ___constant_swab16((x)) |
#define __constant_cpu_to_le64(x) I DON'T KNOW |
#define __constant_le64_to_cpu(x) I DON'T KNOW |
#define __constant_cpu_to_le32(x) ___constant_swahw32((x)) |
#define __constant_le32_to_cpu(x) ___constant_swahw32((x)) |
#define __constant_cpu_to_le16(x) ((__u16)(x) |
#define __constant_le16_to_cpu(x) ((__u16)(x) |
#define __constant_cpu_to_be64(x) I DON'T KNOW |
#define __constant_be64_to_cpu(x) I DON'T KNOW |
#define __constant_cpu_to_be32(x) ___constant_swahb32((x)) |
#define __constant_be32_to_cpu(x) ___constant_swahb32((x)) |
#define __constant_cpu_to_be16(x) ___constant_swab16((x)) |
#define __constant_be16_to_cpu(x) ___constant_swab16((x)) |
#define __cpu_to_le64(x) I DON'T KNOW |
#define __le64_to_cpu(x) I DON'T KNOW |
#define __cpu_to_le32(x) ___swahw32((x)) |
#define __le32_to_cpu(x) ___swahw32((x)) |
#define __cpu_to_le16(x) ((__u16)(x) |
#define __le16_to_cpu(x) ((__u16)(x) |
#define __cpu_to_be64(x) I DON'T KNOW |
#define __be64_to_cpu(x) I DON'T KNOW |
#define __cpu_to_be32(x) __swahb32((x)) |
#define __be32_to_cpu(x) __swahb32((x)) |
#define __cpu_to_be16(x) __swab16((x)) |
#define __be16_to_cpu(x) __swab16((x)) |
#define __cpu_to_le64p(x) I DON'T KNOW |
#define __le64_to_cpup(x) I DON'T KNOW |
#define __cpu_to_le32p(x) ___swahw32p((x)) |
#define __le32_to_cpup(x) ___swahw32p((x)) |
#define __cpu_to_le16p(x) (*(__u16*)(x)) |
#define __le16_to_cpup(x) (*(__u16*)(x)) |
#define __cpu_to_be64p(x) I DON'T KNOW |
#define __be64_to_cpup(x) I DON'T KNOW |
#define __cpu_to_be32p(x) __swahb32p((x)) |
#define __be32_to_cpup(x) __swahb32p((x)) |
#define __cpu_to_be16p(x) __swab16p((x)) |
#define __be16_to_cpup(x) __swab16p((x)) |
#define __cpu_to_le64s(x) I DON'T KNOW |
#define __le64_to_cpus(x) I DON'T KNOW |
#define __cpu_to_le32s(x) ___swahw32s((x)) |
#define __le32_to_cpus(x) ___swahw32s((x)) |
#define __cpu_to_le16s(x) do {} while (0) |
#define __le16_to_cpus(x) do {} while (0) |
#define __cpu_to_be64s(x) I DON'T KNOW |
#define __be64_to_cpus(x) I DON'T KNOW |
#define __cpu_to_be32s(x) __swahb32s((x)) |
#define __be32_to_cpus(x) __swahb32s((x)) |
#define __cpu_to_be16s(x) __swab16s((x)) |
#define __be16_to_cpus(x) __swab16s((x)) |
#include <linux/byteorder/generic.h> |
#endif /* _LINUX_BYTEORDER_PDP_ENDIAN_H */ |
/contrib/media/updf/include/linux/byteorder/swab.h |
---|
0,0 → 1,190 |
#ifndef _LINUX_BYTEORDER_SWAB_H |
#define _LINUX_BYTEORDER_SWAB_H |
/* |
* linux/byteorder/swab.h |
* Byte-swapping, independently from CPU endianness |
* swabXX[ps]?(foo) |
* |
* Francois-Rene Rideau <fare@tunes.org> 19971205 |
* separated swab functions from cpu_to_XX, |
* to clean up support for bizarre-endian architectures. |
* |
* See asm-i386/byteorder.h and suches for examples of how to provide |
* architecture-dependent optimized versions |
* |
*/ |
/* casts are necessary for constants, because we never know how for sure |
* how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way. |
*/ |
#define ___swab16(x) \ |
({ \ |
__u16 __x = (x); \ |
((__u16)( \ |
(((__u16)(__x) & (__u16)0x00ffU) << 8) | \ |
(((__u16)(__x) & (__u16)0xff00U) >> 8) )); \ |
}) |
#define ___swab32(x) \ |
({ \ |
__u32 __x = (x); \ |
((__u32)( \ |
(((__u32)(__x) & (__u32)0x000000ffUL) << 24) | \ |
(((__u32)(__x) & (__u32)0x0000ff00UL) << 8) | \ |
(((__u32)(__x) & (__u32)0x00ff0000UL) >> 8) | \ |
(((__u32)(__x) & (__u32)0xff000000UL) >> 24) )); \ |
}) |
#define ___swab64(x) \ |
({ \ |
__u64 __x = (x); \ |
((__u64)( \ |
(__u64)(((__u64)(__x) & (__u64)0x00000000000000ffULL) << 56) | \ |
(__u64)(((__u64)(__x) & (__u64)0x000000000000ff00ULL) << 40) | \ |
(__u64)(((__u64)(__x) & (__u64)0x0000000000ff0000ULL) << 24) | \ |
(__u64)(((__u64)(__x) & (__u64)0x00000000ff000000ULL) << 8) | \ |
(__u64)(((__u64)(__x) & (__u64)0x000000ff00000000ULL) >> 8) | \ |
(__u64)(((__u64)(__x) & (__u64)0x0000ff0000000000ULL) >> 24) | \ |
(__u64)(((__u64)(__x) & (__u64)0x00ff000000000000ULL) >> 40) | \ |
(__u64)(((__u64)(__x) & (__u64)0xff00000000000000ULL) >> 56) )); \ |
}) |
#define ___constant_swab16(x) \ |
((__u16)( \ |
(((__u16)(x) & (__u16)0x00ffU) << 8) | \ |
(((__u16)(x) & (__u16)0xff00U) >> 8) )) |
#define ___constant_swab32(x) \ |
((__u32)( \ |
(((__u32)(x) & (__u32)0x000000ffUL) << 24) | \ |
(((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \ |
(((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \ |
(((__u32)(x) & (__u32)0xff000000UL) >> 24) )) |
#define ___constant_swab64(x) \ |
((__u64)( \ |
(__u64)(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \ |
(__u64)(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \ |
(__u64)(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \ |
(__u64)(((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \ |
(__u64)(((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \ |
(__u64)(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \ |
(__u64)(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \ |
(__u64)(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56) )) |
/* |
* provide defaults when no architecture-specific optimization is detected |
*/ |
#ifndef __arch__swab16 |
# define __arch__swab16(x) ({ __u16 __tmp = (x) ; ___swab16(__tmp); }) |
#endif |
#ifndef __arch__swab32 |
# define __arch__swab32(x) ({ __u32 __tmp = (x) ; ___swab32(__tmp); }) |
#endif |
#ifndef __arch__swab64 |
# define __arch__swab64(x) ({ __u64 __tmp = (x) ; ___swab64(__tmp); }) |
#endif |
#ifndef __arch__swab16p |
# define __arch__swab16p(x) __arch__swab16(*(x)) |
#endif |
#ifndef __arch__swab32p |
# define __arch__swab32p(x) __arch__swab32(*(x)) |
#endif |
#ifndef __arch__swab64p |
# define __arch__swab64p(x) __arch__swab64(*(x)) |
#endif |
#ifndef __arch__swab16s |
# define __arch__swab16s(x) do { *(x) = __arch__swab16p((x)); } while (0) |
#endif |
#ifndef __arch__swab32s |
# define __arch__swab32s(x) do { *(x) = __arch__swab32p((x)); } while (0) |
#endif |
#ifndef __arch__swab64s |
# define __arch__swab64s(x) do { *(x) = __arch__swab64p((x)); } while (0) |
#endif |
/* |
* Allow constant folding |
*/ |
#if defined(__GNUC__) && (__GNUC__ >= 2) && defined(__OPTIMIZE__) |
# define __swab16(x) \ |
(__builtin_constant_p((__u16)(x)) ? \ |
___swab16((x)) : \ |
__fswab16((x))) |
# define __swab32(x) \ |
(__builtin_constant_p((__u32)(x)) ? \ |
___swab32((x)) : \ |
__fswab32((x))) |
# define __swab64(x) \ |
(__builtin_constant_p((__u64)(x)) ? \ |
___swab64((x)) : \ |
__fswab64((x))) |
#else |
# define __swab16(x) __fswab16(x) |
# define __swab32(x) __fswab32(x) |
# define __swab64(x) __fswab64(x) |
#endif /* OPTIMIZE */ |
static __inline__ __const__ __u16 __fswab16(__u16 x) |
{ |
return __arch__swab16(x); |
} |
static __inline__ __u16 __swab16p(__u16 *x) |
{ |
return __arch__swab16p(x); |
} |
static __inline__ void __swab16s(__u16 *addr) |
{ |
__arch__swab16s(addr); |
} |
static __inline__ __const__ __u32 __fswab32(__u32 x) |
{ |
return __arch__swab32(x); |
} |
static __inline__ __u32 __swab32p(__u32 *x) |
{ |
return __arch__swab32p(x); |
} |
static __inline__ void __swab32s(__u32 *addr) |
{ |
__arch__swab32s(addr); |
} |
#ifdef __BYTEORDER_HAS_U64__ |
static __inline__ const __u64 __fswab64(__u64 x) |
{ |
# ifdef __SWAB_64_THRU_32__ |
__u32 h = x >> 32; |
__u32 l = x & ((1ULL<<32)-1); |
return (((__u64)__swab32(l)) << 32) | ((__u64)(__swab32(h))); |
# else |
return __arch__swab64(x); |
# endif |
} |
static __inline__ __u64 __swab64p(__u64 *x) |
{ |
return __arch__swab64p(x); |
} |
static __inline__ void __swab64s(__u64 *addr) |
{ |
__arch__swab64s(addr); |
} |
#endif /* __BYTEORDER_HAS_U64__ */ |
#if defined(__KERNEL__) |
#define swab16 __swab16 |
#define swab32 __swab32 |
#define swab64 __swab64 |
#define swab16p __swab16p |
#define swab32p __swab32p |
#define swab64p __swab64p |
#define swab16s __swab16s |
#define swab32s __swab32s |
#define swab64s __swab64s |
#endif |
#endif /* _LINUX_BYTEORDER_SWAB_H */ |
/contrib/media/updf/include/linux/byteorder/swabb.h |
---|
0,0 → 1,137 |
#ifndef _LINUX_BYTEORDER_SWABB_H |
#define _LINUX_BYTEORDER_SWABB_H |
/* |
* linux/byteorder/swabb.h |
* SWAp Bytes Bizarrely |
* swaHHXX[ps]?(foo) |
* |
* Support for obNUXIous pdp-endian and other bizarre architectures. |
* Will Linux ever run on such ancient beasts? if not, this file |
* will be but a programming pearl. Still, it's a reminder that we |
* shouldn't be making too many assumptions when trying to be portable. |
* |
*/ |
/* |
* Meaning of the names I chose (vaxlinux people feel free to correct them): |
* swahw32 swap 16-bit half-words in a 32-bit word |
* swahb32 swap 8-bit halves of each 16-bit half-word in a 32-bit word |
* |
* No 64-bit support yet. I don't know NUXI conventions for long longs. |
* I guarantee it will be a mess when it's there, though :-> |
* It will be even worse if there are conflicting 64-bit conventions. |
* Hopefully, no one ever used 64-bit objects on NUXI machines. |
* |
*/ |
#define ___swahw32(x) \ |
({ \ |
__u32 __x = (x); \ |
((__u32)( \ |
(((__u32)(__x) & (__u32)0x0000ffffUL) << 16) | \ |
(((__u32)(__x) & (__u32)0xffff0000UL) >> 16) )); \ |
}) |
#define ___swahb32(x) \ |
({ \ |
__u32 __x = (x); \ |
((__u32)( \ |
(((__u32)(__x) & (__u32)0x00ff00ffUL) << 8) | \ |
(((__u32)(__x) & (__u32)0xff00ff00UL) >> 8) )); \ |
}) |
#define ___constant_swahw32(x) \ |
((__u32)( \ |
(((__u32)(x) & (__u32)0x0000ffffUL) << 16) | \ |
(((__u32)(x) & (__u32)0xffff0000UL) >> 16) )) |
#define ___constant_swahb32(x) \ |
((__u32)( \ |
(((__u32)(x) & (__u32)0x00ff00ffUL) << 8) | \ |
(((__u32)(x) & (__u32)0xff00ff00UL) >> 8) )) |
/* |
* provide defaults when no architecture-specific optimization is detected |
*/ |
#ifndef __arch__swahw32 |
# define __arch__swahw32(x) ___swahw32(x) |
#endif |
#ifndef __arch__swahb32 |
# define __arch__swahb32(x) ___swahb32(x) |
#endif |
#ifndef __arch__swahw32p |
# define __arch__swahw32p(x) __swahw32(*(x)) |
#endif |
#ifndef __arch__swahb32p |
# define __arch__swahb32p(x) __swahb32(*(x)) |
#endif |
#ifndef __arch__swahw32s |
# define __arch__swahw32s(x) do { *(x) = __swahw32p((x)); } while (0) |
#endif |
#ifndef __arch__swahb32s |
# define __arch__swahb32s(x) do { *(x) = __swahb32p((x)); } while (0) |
#endif |
/* |
* Allow constant folding |
*/ |
#if defined(__GNUC__) && (__GNUC__ >= 2) && defined(__OPTIMIZE__) |
# define __swahw32(x) \ |
(__builtin_constant_p((__u32)(x)) ? \ |
___swahw32((x)) : \ |
__fswahw32((x))) |
# define __swahb32(x) \ |
(__builtin_constant_p((__u32)(x)) ? \ |
___swahb32((x)) : \ |
__fswahb32((x))) |
#else |
# define __swahw32(x) __fswahw32(x) |
# define __swahb32(x) __fswahb32(x) |
#endif /* OPTIMIZE */ |
static __inline__ __const__ __u32 __fswahw32(__u32 x) |
{ |
return __arch__swahw32(x); |
} |
static __inline__ __u32 __swahw32p(__u32 *x) |
{ |
return __arch__swahw32p(x); |
} |
static __inline__ void __swahw32s(__u32 *addr) |
{ |
__arch__swahw32s(addr); |
} |
static __inline__ __const__ __u32 __fswahb32(__u32 x) |
{ |
return __arch__swahb32(x); |
} |
static __inline__ __u32 __swahb32p(__u32 *x) |
{ |
return __arch__swahb32p(x); |
} |
static __inline__ void __swahb32s(__u32 *addr) |
{ |
__arch__swahb32s(addr); |
} |
#ifdef __BYTEORDER_HAS_U64__ |
/* |
* Not supported yet |
*/ |
#endif /* __BYTEORDER_HAS_U64__ */ |
#if defined(__KERNEL__) |
#define swahw32 __swahw32 |
#define swahb32 __swahb32 |
#define swahw32p __swahw32p |
#define swahb32p __swahb32p |
#define swahw32s __swahw32s |
#define swahb32s __swahb32s |
#endif |
#endif /* _LINUX_BYTEORDER_SWABB_H */ |