Rev 4065 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
4065 | Serge | 1 | #ifndef _LINUX_HASH_H |
2 | #define _LINUX_HASH_H |
||
3 | /* Fast hashing routine for ints, longs and pointers. |
||
4 | (C) 2002 Nadia Yvette Chambers, IBM */ |
||
5 | |||
6 | /* |
||
7 | * Knuth recommends primes in approximately golden ratio to the maximum |
||
8 | * integer representable by a machine word for multiplicative hashing. |
||
9 | * Chuck Lever verified the effectiveness of this technique: |
||
10 | * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf |
||
11 | * |
||
12 | * These primes are chosen to be bit-sparse, that is operations on |
||
13 | * them can use shifts and additions instead of multiplications for |
||
14 | * machines where multiplications are slow. |
||
15 | */ |
||
16 | |||
17 | #include |
||
18 | #include |
||
19 | |||
20 | /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ |
||
21 | #define GOLDEN_RATIO_PRIME_32 0x9e370001UL |
||
22 | /* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */ |
||
23 | #define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL |
||
24 | |||
25 | #if BITS_PER_LONG == 32 |
||
26 | #define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_32 |
||
27 | #define hash_long(val, bits) hash_32(val, bits) |
||
28 | #elif BITS_PER_LONG == 64 |
||
29 | #define hash_long(val, bits) hash_64(val, bits) |
||
30 | #define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_64 |
||
31 | #else |
||
32 | #error Wordsize not 32 or 64 |
||
33 | #endif |
||
34 | |||
35 | static __always_inline u64 hash_64(u64 val, unsigned int bits) |
||
36 | { |
||
37 | u64 hash = val; |
||
38 | |||
5270 | serge | 39 | #if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64 |
40 | hash = hash * GOLDEN_RATIO_PRIME_64; |
||
41 | #else |
||
4065 | Serge | 42 | /* Sigh, gcc can't optimise this alone like it does for 32 bits. */ |
43 | u64 n = hash; |
||
44 | n <<= 18; |
||
45 | hash -= n; |
||
46 | n <<= 33; |
||
47 | hash -= n; |
||
48 | n <<= 3; |
||
49 | hash += n; |
||
50 | n <<= 3; |
||
51 | hash -= n; |
||
52 | n <<= 4; |
||
53 | hash += n; |
||
54 | n <<= 2; |
||
55 | hash += n; |
||
5270 | serge | 56 | #endif |
4065 | Serge | 57 | |
58 | /* High bits are more random, so use them. */ |
||
59 | return hash >> (64 - bits); |
||
60 | } |
||
61 | |||
62 | static inline u32 hash_32(u32 val, unsigned int bits) |
||
63 | { |
||
64 | /* On some cpus multiply is faster, on others gcc will do shifts */ |
||
65 | u32 hash = val * GOLDEN_RATIO_PRIME_32; |
||
66 | |||
67 | /* High bits are more random, so use them. */ |
||
68 | return hash >> (32 - bits); |
||
69 | } |
||
70 | |||
71 | static inline unsigned long hash_ptr(const void *ptr, unsigned int bits) |
||
72 | { |
||
73 | return hash_long((unsigned long)ptr, bits); |
||
74 | } |
||
75 | |||
76 | static inline u32 hash32_ptr(const void *ptr) |
||
77 | { |
||
78 | unsigned long val = (unsigned long)ptr; |
||
79 | |||
80 | #if BITS_PER_LONG == 64 |
||
81 | val ^= (val >> 32); |
||
82 | #endif |
||
83 | return (u32)val; |
||
84 | } |
||
5270 | serge | 85 | |
4065 | Serge | 86 | #endif /* _LINUX_HASH_H */=><=>=><=>=><=>=><=>=><=>=><=> |