00001
00002
00003
00004
00005
00006
00007
00008
00009
00010 #include "MurmurHash3.h"
00011
00012
00013
00014
00015
00016
00017 #if defined(_MSC_VER)
00018
00019 #define FORCE_INLINE __forceinline
00020
00021 #include <cstdlib>
00022
00023 #define ROTL32(x,y) _rotl(x,y)
00024 #define ROTL64(x,y) _rotl64(x,y)
00025
00026 #define BIG_CONSTANT(x) (x)
00027
00028
00029
00030 #else // defined(_MSC_VER)
00031
00032 #define FORCE_INLINE inline __attribute__((always_inline))
00033
00034 inline uint32_t rotl32 ( uint32_t x, int8_t r )
00035 {
00036 return (x << r) | (x >> (32 - r));
00037 }
00038
00039 inline uint64_t rotl64 ( uint64_t x, int8_t r )
00040 {
00041 return (x << r) | (x >> (64 - r));
00042 }
00043
00044 #define ROTL32(x,y) rotl32(x,y)
00045 #define ROTL64(x,y) rotl64(x,y)
00046
00047 #define BIG_CONSTANT(x) (x##LLU)
00048
00049 #endif // !defined(_MSC_VER)
00050
00051
00052
00053
00054
00055 FORCE_INLINE uint32_t getblock ( const uint32_t * p, int i )
00056 {
00057 return p[i];
00058 }
00059
00060 FORCE_INLINE uint64_t getblock ( const uint64_t * p, int i )
00061 {
00062 return p[i];
00063 }
00064
00065
00066
00067
00068 FORCE_INLINE uint32_t fmix ( uint32_t h )
00069 {
00070 h ^= h >> 16;
00071 h *= 0x85ebca6b;
00072 h ^= h >> 13;
00073 h *= 0xc2b2ae35;
00074 h ^= h >> 16;
00075
00076 return h;
00077 }
00078
00079
00080
00081 FORCE_INLINE uint64_t fmix ( uint64_t k )
00082 {
00083 k ^= k >> 33;
00084 k *= BIG_CONSTANT(0xff51afd7ed558ccd);
00085 k ^= k >> 33;
00086 k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
00087 k ^= k >> 33;
00088
00089 return k;
00090 }
00091
00092
00093
00094 void MurmurHash3_x86_32 ( const void * key, int len,
00095 uint32_t seed, void * out )
00096 {
00097 const uint8_t * data = (const uint8_t*)key;
00098 const int nblocks = len / 4;
00099
00100 uint32_t h1 = seed;
00101
00102 uint32_t c1 = 0xcc9e2d51;
00103 uint32_t c2 = 0x1b873593;
00104
00105
00106
00107
00108 const uint32_t * blocks = (const uint32_t *)(data + nblocks*4);
00109
00110 for(int i = -nblocks; i; i++) {
00111 uint32_t k1 = getblock(blocks,i);
00112
00113 k1 *= c1;
00114 k1 = ROTL32(k1,15);
00115 k1 *= c2;
00116
00117 h1 ^= k1;
00118 h1 = ROTL32(h1,13);
00119 h1 = h1*5+0xe6546b64;
00120 }
00121
00122
00123
00124
00125 const uint8_t * tail = (const uint8_t*)(data + nblocks*4);
00126
00127 uint32_t k1 = 0;
00128
00129 switch(len & 3) {
00130 case 3:
00131 k1 ^= tail[2] << 16;
00132 case 2:
00133 k1 ^= tail[1] << 8;
00134 case 1:
00135 k1 ^= tail[0];
00136 k1 *= c1;
00137 k1 = ROTL32(k1,15);
00138 k1 *= c2;
00139 h1 ^= k1;
00140 };
00141
00142
00143
00144
00145 h1 ^= len;
00146
00147 h1 = fmix(h1);
00148
00149 *(uint32_t*)out = h1;
00150 }
00151
00152
00153
00154 void MurmurHash3_x86_128 ( const void * key, const int len,
00155 uint32_t seed, void * out )
00156 {
00157 const uint8_t * data = (const uint8_t*)key;
00158 const int nblocks = len / 16;
00159
00160 uint32_t h1 = seed;
00161 uint32_t h2 = seed;
00162 uint32_t h3 = seed;
00163 uint32_t h4 = seed;
00164
00165 uint32_t c1 = 0x239b961b;
00166 uint32_t c2 = 0xab0e9789;
00167 uint32_t c3 = 0x38b34ae5;
00168 uint32_t c4 = 0xa1e38b93;
00169
00170
00171
00172
00173 const uint32_t * blocks = (const uint32_t *)(data + nblocks*16);
00174
00175 for(int i = -nblocks; i; i++) {
00176 uint32_t k1 = getblock(blocks,i*4+0);
00177 uint32_t k2 = getblock(blocks,i*4+1);
00178 uint32_t k3 = getblock(blocks,i*4+2);
00179 uint32_t k4 = getblock(blocks,i*4+3);
00180
00181 k1 *= c1;
00182 k1 = ROTL32(k1,15);
00183 k1 *= c2;
00184 h1 ^= k1;
00185
00186 h1 = ROTL32(h1,19);
00187 h1 += h2;
00188 h1 = h1*5+0x561ccd1b;
00189
00190 k2 *= c2;
00191 k2 = ROTL32(k2,16);
00192 k2 *= c3;
00193 h2 ^= k2;
00194
00195 h2 = ROTL32(h2,17);
00196 h2 += h3;
00197 h2 = h2*5+0x0bcaa747;
00198
00199 k3 *= c3;
00200 k3 = ROTL32(k3,17);
00201 k3 *= c4;
00202 h3 ^= k3;
00203
00204 h3 = ROTL32(h3,15);
00205 h3 += h4;
00206 h3 = h3*5+0x96cd1c35;
00207
00208 k4 *= c4;
00209 k4 = ROTL32(k4,18);
00210 k4 *= c1;
00211 h4 ^= k4;
00212
00213 h4 = ROTL32(h4,13);
00214 h4 += h1;
00215 h4 = h4*5+0x32ac3b17;
00216 }
00217
00218
00219
00220
00221 const uint8_t * tail = (const uint8_t*)(data + nblocks*16);
00222
00223 uint32_t k1 = 0;
00224 uint32_t k2 = 0;
00225 uint32_t k3 = 0;
00226 uint32_t k4 = 0;
00227
00228 switch(len & 15) {
00229 case 15:
00230 k4 ^= tail[14] << 16;
00231 case 14:
00232 k4 ^= tail[13] << 8;
00233 case 13:
00234 k4 ^= tail[12] << 0;
00235 k4 *= c4;
00236 k4 = ROTL32(k4,18);
00237 k4 *= c1;
00238 h4 ^= k4;
00239
00240 case 12:
00241 k3 ^= tail[11] << 24;
00242 case 11:
00243 k3 ^= tail[10] << 16;
00244 case 10:
00245 k3 ^= tail[ 9] << 8;
00246 case 9:
00247 k3 ^= tail[ 8] << 0;
00248 k3 *= c3;
00249 k3 = ROTL32(k3,17);
00250 k3 *= c4;
00251 h3 ^= k3;
00252
00253 case 8:
00254 k2 ^= tail[ 7] << 24;
00255 case 7:
00256 k2 ^= tail[ 6] << 16;
00257 case 6:
00258 k2 ^= tail[ 5] << 8;
00259 case 5:
00260 k2 ^= tail[ 4] << 0;
00261 k2 *= c2;
00262 k2 = ROTL32(k2,16);
00263 k2 *= c3;
00264 h2 ^= k2;
00265
00266 case 4:
00267 k1 ^= tail[ 3] << 24;
00268 case 3:
00269 k1 ^= tail[ 2] << 16;
00270 case 2:
00271 k1 ^= tail[ 1] << 8;
00272 case 1:
00273 k1 ^= tail[ 0] << 0;
00274 k1 *= c1;
00275 k1 = ROTL32(k1,15);
00276 k1 *= c2;
00277 h1 ^= k1;
00278 };
00279
00280
00281
00282
00283 h1 ^= len;
00284 h2 ^= len;
00285 h3 ^= len;
00286 h4 ^= len;
00287
00288 h1 += h2;
00289 h1 += h3;
00290 h1 += h4;
00291 h2 += h1;
00292 h3 += h1;
00293 h4 += h1;
00294
00295 h1 = fmix(h1);
00296 h2 = fmix(h2);
00297 h3 = fmix(h3);
00298 h4 = fmix(h4);
00299
00300 h1 += h2;
00301 h1 += h3;
00302 h1 += h4;
00303 h2 += h1;
00304 h3 += h1;
00305 h4 += h1;
00306
00307 ((uint32_t*)out)[0] = h1;
00308 ((uint32_t*)out)[1] = h2;
00309 ((uint32_t*)out)[2] = h3;
00310 ((uint32_t*)out)[3] = h4;
00311 }
00312
00313
00314
00315 void MurmurHash3_x64_128 ( const void * key, const int len,
00316 const uint32_t seed, void * out )
00317 {
00318 const uint8_t * data = (const uint8_t*)key;
00319 const int nblocks = len / 16;
00320
00321 uint64_t h1 = seed;
00322 uint64_t h2 = seed;
00323
00324 uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);
00325 uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);
00326
00327
00328
00329
00330 const uint64_t * blocks = (const uint64_t *)(data);
00331
00332 for(int i = 0; i < nblocks; i++) {
00333 uint64_t k1 = getblock(blocks,i*2+0);
00334 uint64_t k2 = getblock(blocks,i*2+1);
00335
00336 k1 *= c1;
00337 k1 = ROTL64(k1,31);
00338 k1 *= c2;
00339 h1 ^= k1;
00340
00341 h1 = ROTL64(h1,27);
00342 h1 += h2;
00343 h1 = h1*5+0x52dce729;
00344
00345 k2 *= c2;
00346 k2 = ROTL64(k2,33);
00347 k2 *= c1;
00348 h2 ^= k2;
00349
00350 h2 = ROTL64(h2,31);
00351 h2 += h1;
00352 h2 = h2*5+0x38495ab5;
00353 }
00354
00355
00356
00357
00358 const uint8_t * tail = (const uint8_t*)(data + nblocks*16);
00359
00360 uint64_t k1 = 0;
00361 uint64_t k2 = 0;
00362
00363 switch(len & 15) {
00364 case 15:
00365 k2 ^= uint64_t(tail[14]) << 48;
00366 case 14:
00367 k2 ^= uint64_t(tail[13]) << 40;
00368 case 13:
00369 k2 ^= uint64_t(tail[12]) << 32;
00370 case 12:
00371 k2 ^= uint64_t(tail[11]) << 24;
00372 case 11:
00373 k2 ^= uint64_t(tail[10]) << 16;
00374 case 10:
00375 k2 ^= uint64_t(tail[ 9]) << 8;
00376 case 9:
00377 k2 ^= uint64_t(tail[ 8]) << 0;
00378 k2 *= c2;
00379 k2 = ROTL64(k2,33);
00380 k2 *= c1;
00381 h2 ^= k2;
00382
00383 case 8:
00384 k1 ^= uint64_t(tail[ 7]) << 56;
00385 case 7:
00386 k1 ^= uint64_t(tail[ 6]) << 48;
00387 case 6:
00388 k1 ^= uint64_t(tail[ 5]) << 40;
00389 case 5:
00390 k1 ^= uint64_t(tail[ 4]) << 32;
00391 case 4:
00392 k1 ^= uint64_t(tail[ 3]) << 24;
00393 case 3:
00394 k1 ^= uint64_t(tail[ 2]) << 16;
00395 case 2:
00396 k1 ^= uint64_t(tail[ 1]) << 8;
00397 case 1:
00398 k1 ^= uint64_t(tail[ 0]) << 0;
00399 k1 *= c1;
00400 k1 = ROTL64(k1,31);
00401 k1 *= c2;
00402 h1 ^= k1;
00403 };
00404
00405
00406
00407
00408 h1 ^= len;
00409 h2 ^= len;
00410
00411 h1 += h2;
00412 h2 += h1;
00413
00414 h1 = fmix(h1);
00415 h2 = fmix(h2);
00416
00417 h1 += h2;
00418 h2 += h1;
00419
00420 ((uint64_t*)out)[0] = h1;
00421 ((uint64_t*)out)[1] = h2;
00422 }
00423
00424
00425