@@ -219,29 +219,30 @@ inline uint64_t hash_17to32_bytes(const char *s, size_t len, uint64_t seed) {
219219 uint64_t b = fetch64 (s + 8 );
220220 uint64_t c = fetch64 (s + len - 8 ) * k2;
221221 uint64_t d = fetch64 (s + len - 16 ) * k0;
222- return hash_16_bytes (rotate (a - b, 43 ) + rotate (c ^ seed, 30 ) + d,
223- a + rotate (b ^ k3, 20 ) - c + len + seed);
222+ return hash_16_bytes (llvm::rotr<uint64_t >(a - b, 43 ) +
223+ llvm::rotr<uint64_t >(c ^ seed, 30 ) + d,
224+ a + llvm::rotr<uint64_t >(b ^ k3, 20 ) - c + len + seed);
224225}
225226
226227inline uint64_t hash_33to64_bytes (const char *s, size_t len, uint64_t seed) {
227228 uint64_t z = fetch64 (s + 24 );
228229 uint64_t a = fetch64 (s) + (len + fetch64 (s + len - 16 )) * k0;
229- uint64_t b = rotate (a + z, 52 );
230- uint64_t c = rotate (a, 37 );
230+ uint64_t b = llvm::rotr< uint64_t > (a + z, 52 );
231+ uint64_t c = llvm::rotr< uint64_t > (a, 37 );
231232 a += fetch64 (s + 8 );
232- c += rotate (a, 7 );
233+ c += llvm::rotr< uint64_t > (a, 7 );
233234 a += fetch64 (s + 16 );
234235 uint64_t vf = a + z;
235- uint64_t vs = b + rotate (a, 31 ) + c;
236+ uint64_t vs = b + llvm::rotr< uint64_t > (a, 31 ) + c;
236237 a = fetch64 (s + 16 ) + fetch64 (s + len - 32 );
237238 z = fetch64 (s + len - 8 );
238- b = rotate (a + z, 52 );
239- c = rotate (a, 37 );
239+ b = llvm::rotr< uint64_t > (a + z, 52 );
240+ c = llvm::rotr< uint64_t > (a, 37 );
240241 a += fetch64 (s + len - 24 );
241- c += rotate (a, 7 );
242+ c += llvm::rotr< uint64_t > (a, 7 );
242243 a += fetch64 (s + len - 16 );
243244 uint64_t wf = a + z;
244- uint64_t ws = b + rotate (a, 31 ) + c;
245+ uint64_t ws = b + llvm::rotr< uint64_t > (a, 31 ) + c;
245246 uint64_t r = shift_mix ((vf + ws) * k2 + (wf + vs) * k0);
246247 return shift_mix ((seed ^ (r * k0)) + vs) * k2;
247248}
@@ -271,9 +272,13 @@ struct hash_state {
271272 // / seed and the first 64-byte chunk.
272273 // / This effectively performs the initial mix.
273274 static hash_state create (const char *s, uint64_t seed) {
274- hash_state state = {
275- 0 , seed, hash_16_bytes (seed, k1), rotate (seed ^ k1, 49 ),
276- seed * k1, shift_mix (seed), 0 };
275+ hash_state state = {0 ,
276+ seed,
277+ hash_16_bytes (seed, k1),
278+ llvm::rotr<uint64_t >(seed ^ k1, 49 ),
279+ seed * k1,
280+ shift_mix (seed),
281+ 0 };
277282 state.h6 = hash_16_bytes (state.h4 , state.h5 );
278283 state.mix (s);
279284 return state;
@@ -284,22 +289,22 @@ struct hash_state {
284289 static void mix_32_bytes (const char *s, uint64_t &a, uint64_t &b) {
285290 a += fetch64 (s);
286291 uint64_t c = fetch64 (s + 24 );
287- b = rotate (b + a + c, 21 );
292+ b = llvm::rotr< uint64_t > (b + a + c, 21 );
288293 uint64_t d = a;
289294 a += fetch64 (s + 8 ) + fetch64 (s + 16 );
290- b += rotate (a, 44 ) + d;
295+ b += llvm::rotr< uint64_t > (a, 44 ) + d;
291296 a += c;
292297 }
293298
294299 // / Mix in a 64-byte buffer of data.
295300 // / We mix all 64 bytes even when the chunk length is smaller, but we
296301 // / record the actual length.
297302 void mix (const char *s) {
298- h0 = rotate (h0 + h1 + h3 + fetch64 (s + 8 ), 37 ) * k1;
299- h1 = rotate (h1 + h4 + fetch64 (s + 48 ), 42 ) * k1;
303+ h0 = llvm::rotr< uint64_t > (h0 + h1 + h3 + fetch64 (s + 8 ), 37 ) * k1;
304+ h1 = llvm::rotr< uint64_t > (h1 + h4 + fetch64 (s + 48 ), 42 ) * k1;
300305 h0 ^= h6;
301306 h1 += h3 + fetch64 (s + 40 );
302- h2 = rotate (h2 + h5, 33 ) * k1;
307+ h2 = llvm::rotr< uint64_t > (h2 + h5, 33 ) * k1;
303308 h3 = h4 * k1;
304309 h4 = h0 + h5;
305310 mix_32_bytes (s, h3, h4);
0 commit comments