1193854Skmacy/*
2193854Skmacy * Taken from http://burtleburtle.net/bob/c/lookup3.c
3193854Skmacy * $FreeBSD$
4193854Skmacy */
5193854Skmacy
6240086Sglebius#include <sys/hash.h>
7240086Sglebius#include <machine/endian.h>
8240086Sglebius
9193860Skmacy/*
10193860Skmacy-------------------------------------------------------------------------------
11240086Sglebiuslookup3.c, by Bob Jenkins, May 2006, Public Domain.
12193860Skmacy
13240086SglebiusThese are functions for producing 32-bit hashes for hash table lookup.
14240086Sglebiushashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
15240086Sglebiusare externally useful functions.  Routines to test the hash are included
16240086Sglebiusif SELF_TEST is defined.  You can use this free for any purpose.  It's in
17240086Sglebiusthe public domain.  It has no warranty.
18193860Skmacy
19240086SglebiusYou probably want to use hashlittle().  hashlittle() and hashbig()
20240521Seadlerhash byte arrays.  hashlittle() is faster than hashbig() on
21240086Sglebiuslittle-endian machines.  Intel and AMD are little-endian machines.
22240086SglebiusOn second thought, you probably want hashlittle2(), which is identical to
23240086Sglebiushashlittle() except it returns two 32-bit hashes for the price of one.
24240086SglebiusYou could implement hashbig2() if you wanted but I haven't bothered here.
25193860Skmacy
26240086SglebiusIf you want to find a hash of, say, exactly 7 integers, do
27240086Sglebius  a = i1;  b = i2;  c = i3;
28240086Sglebius  mix(a,b,c);
29240086Sglebius  a += i4; b += i5; c += i6;
30240086Sglebius  mix(a,b,c);
31240086Sglebius  a += i7;
32240086Sglebius  final(a,b,c);
33240086Sglebiusthen use c as the hash value.  If you have a variable length array of
34240086Sglebius4-byte integers to hash, use hashword().  If you have a byte array (like
35240086Sglebiusa character string), use hashlittle().  If you have several byte arrays, or
36240086Sglebiusa mix of things, see the comments above hashlittle().
37240086Sglebius
38240086SglebiusWhy is this so big?  I read 12 bytes at a time into 3 4-byte integers,
39240086Sglebiusthen mix those integers.  This is fast (you can do a lot more thorough
40240086Sglebiusmixing with 12*3 instructions on 3 integers than you can with 3 instructions
41240086Sglebiuson 1 byte), but shoehorning those bytes into integers efficiently is messy.
42193860Skmacy-------------------------------------------------------------------------------
43193860Skmacy*/
44193860Skmacy
45193854Skmacy#define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
46193854Skmacy
47193854Skmacy/*
48193854Skmacy-------------------------------------------------------------------------------
49193854Skmacymix -- mix 3 32-bit values reversibly.
50193854Skmacy
51193854SkmacyThis is reversible, so any information in (a,b,c) before mix() is
52193854Skmacystill in (a,b,c) after mix().
53193854Skmacy
54193854SkmacyIf four pairs of (a,b,c) inputs are run through mix(), or through
55193854Skmacymix() in reverse, there are at least 32 bits of the output that
56193854Skmacyare sometimes the same for one pair and different for another pair.
57193854SkmacyThis was tested for:
58193854Skmacy* pairs that differed by one bit, by two bits, in any combination
59193854Skmacy  of top bits of (a,b,c), or in any combination of bottom bits of
60193854Skmacy  (a,b,c).
61193854Skmacy* "differ" is defined as +, -, ^, or ~^.  For + and -, I transformed
62193854Skmacy  the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
63193854Skmacy  is commonly produced by subtraction) look like a single 1-bit
64193854Skmacy  difference.
65193854Skmacy* the base values were pseudorandom, all zero but one bit set, or
66193854Skmacy  all zero plus a counter that starts at zero.
67193854Skmacy
68193854SkmacySome k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that
69193854Skmacysatisfy this are
70193854Skmacy    4  6  8 16 19  4
71193854Skmacy    9 15  3 18 27 15
72193854Skmacy   14  9  3  7 17  3
73193854SkmacyWell, "9 15 3 18 27 15" didn't quite get 32 bits diffing
74193854Skmacyfor "differ" defined as + with a one-bit base and a two-bit delta.  I
75193854Skmacyused http://burtleburtle.net/bob/hash/avalanche.html to choose
76193854Skmacythe operations, constants, and arrangements of the variables.
77193854Skmacy
78193854SkmacyThis does not achieve avalanche.  There are input bits of (a,b,c)
79193854Skmacythat fail to affect some output bits of (a,b,c), especially of a.  The
80193854Skmacymost thoroughly mixed value is c, but it doesn't really even achieve
81193854Skmacyavalanche in c.
82193854Skmacy
83193854SkmacyThis allows some parallelism.  Read-after-writes are good at doubling
84193854Skmacythe number of bits affected, so the goal of mixing pulls in the opposite
85193854Skmacydirection as the goal of parallelism.  I did what I could.  Rotates
86193854Skmacyseem to cost as much as shifts on every machine I could lay my hands
87193854Skmacyon, and rotates are much kinder to the top and bottom bits, so I used
88193854Skmacyrotates.
89193854Skmacy-------------------------------------------------------------------------------
90193854Skmacy*/
91193854Skmacy#define mix(a,b,c) \
92193854Skmacy{ \
93193854Skmacy  a -= c;  a ^= rot(c, 4);  c += b; \
94193854Skmacy  b -= a;  b ^= rot(a, 6);  a += c; \
95193854Skmacy  c -= b;  c ^= rot(b, 8);  b += a; \
96193854Skmacy  a -= c;  a ^= rot(c,16);  c += b; \
97193854Skmacy  b -= a;  b ^= rot(a,19);  a += c; \
98193854Skmacy  c -= b;  c ^= rot(b, 4);  b += a; \
99193854Skmacy}
100193854Skmacy
101193854Skmacy/*
102193854Skmacy-------------------------------------------------------------------------------
103193854Skmacyfinal -- final mixing of 3 32-bit values (a,b,c) into c
104193854Skmacy
105193854SkmacyPairs of (a,b,c) values differing in only a few bits will usually
106193854Skmacyproduce values of c that look totally different.  This was tested for
107193854Skmacy* pairs that differed by one bit, by two bits, in any combination
108193854Skmacy  of top bits of (a,b,c), or in any combination of bottom bits of
109193854Skmacy  (a,b,c).
110193854Skmacy* "differ" is defined as +, -, ^, or ~^.  For + and -, I transformed
111193854Skmacy  the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
112193854Skmacy  is commonly produced by subtraction) look like a single 1-bit
113193854Skmacy  difference.
114193854Skmacy* the base values were pseudorandom, all zero but one bit set, or
115193854Skmacy  all zero plus a counter that starts at zero.
116193854Skmacy
117193854SkmacyThese constants passed:
118193854Skmacy 14 11 25 16 4 14 24
119193854Skmacy 12 14 25 16 4 14 24
120193854Skmacyand these came close:
121193854Skmacy  4  8 15 26 3 22 24
122193854Skmacy 10  8 15 26 3 22 24
123193854Skmacy 11  8 15 26 3 22 24
124193854Skmacy-------------------------------------------------------------------------------
125193854Skmacy*/
126193854Skmacy#define final(a,b,c) \
127193854Skmacy{ \
128193854Skmacy  c ^= b; c -= rot(b,14); \
129193854Skmacy  a ^= c; a -= rot(c,11); \
130193854Skmacy  b ^= a; b -= rot(a,25); \
131193854Skmacy  c ^= b; c -= rot(b,16); \
132193854Skmacy  a ^= c; a -= rot(c,4);  \
133193854Skmacy  b ^= a; b -= rot(a,14); \
134193854Skmacy  c ^= b; c -= rot(b,24); \
135193854Skmacy}
136193854Skmacy
137193854Skmacy/*
138193854Skmacy--------------------------------------------------------------------
139193854Skmacy This works on all machines.  To be useful, it requires
140193854Skmacy -- that the key be an array of uint32_t's, and
141193854Skmacy -- that the length be the number of uint32_t's in the key
142193854Skmacy
143193854Skmacy The function hashword() is identical to hashlittle() on little-endian
144193854Skmacy machines, and identical to hashbig() on big-endian machines,
145193854Skmacy except that the length has to be measured in uint32_ts rather than in
146193854Skmacy bytes.  hashlittle() is more complicated than hashword() only because
147193854Skmacy hashlittle() has to dance around fitting the key bytes into registers.
148193854Skmacy--------------------------------------------------------------------
149193854Skmacy*/
150240086Sglebiusuint32_t jenkins_hash32(
151240086Sglebiusconst uint32_t *k,                   /* the key, an array of uint32_t values */
152240086Sglebiussize_t          length,               /* the length of the key, in uint32_ts */
153240086Sglebiusuint32_t        initval)         /* the previous hash, or an arbitrary value */
154193854Skmacy{
155193854Skmacy  uint32_t a,b,c;
156193854Skmacy
157193854Skmacy  /* Set up the internal state */
158193854Skmacy  a = b = c = 0xdeadbeef + (((uint32_t)length)<<2) + initval;
159193854Skmacy
160193854Skmacy  /*------------------------------------------------- handle most of the key */
161193854Skmacy  while (length > 3)
162193854Skmacy  {
163193854Skmacy    a += k[0];
164193854Skmacy    b += k[1];
165193854Skmacy    c += k[2];
166193854Skmacy    mix(a,b,c);
167193854Skmacy    length -= 3;
168193854Skmacy    k += 3;
169193854Skmacy  }
170193854Skmacy
171193854Skmacy  /*------------------------------------------- handle the last 3 uint32_t's */
172193854Skmacy  switch(length)                     /* all the case statements fall through */
173193854Skmacy  {
174193854Skmacy  case 3 : c+=k[2];
175193854Skmacy  case 2 : b+=k[1];
176193854Skmacy  case 1 : a+=k[0];
177193854Skmacy    final(a,b,c);
178193854Skmacy  case 0:     /* case 0: nothing left to add */
179193854Skmacy    break;
180193854Skmacy  }
181193854Skmacy  /*------------------------------------------------------ report the result */
182193854Skmacy  return c;
183193854Skmacy}
184240086Sglebius
185240086Sglebius#if BYTE_ORDER == LITTLE_ENDIAN
186240086Sglebius/*
187240086Sglebius-------------------------------------------------------------------------------
188240086Sglebiushashlittle() -- hash a variable-length key into a 32-bit value
189240086Sglebius  k       : the key (the unaligned variable-length array of bytes)
190240086Sglebius  length  : the length of the key, counting by bytes
191240086Sglebius  initval : can be any 4-byte value
192240086SglebiusReturns a 32-bit value.  Every bit of the key affects every bit of
193240086Sglebiusthe return value.  Two keys differing by one or two bits will have
194240086Sglebiustotally different hash values.
195240086Sglebius
196240086SglebiusThe best hash table sizes are powers of 2.  There is no need to do
197240086Sglebiusmod a prime (mod is sooo slow!).  If you need less than 32 bits,
198240086Sglebiususe a bitmask.  For example, if you need only 10 bits, do
199240086Sglebius  h = (h & hashmask(10));
200240086SglebiusIn which case, the hash table should have hashsize(10) elements.
201240086Sglebius
202240086SglebiusIf you are hashing n strings (uint8_t **)k, do it like this:
203240086Sglebius  for (i=0, h=0; i<n; ++i) h = hashlittle( k[i], len[i], h);
204240086Sglebius
205240086SglebiusBy Bob Jenkins, 2006.  bob_jenkins@burtleburtle.net.  You may use this
206240086Sglebiuscode any way you wish, private, educational, or commercial.  It's free.
207240086Sglebius
208240086SglebiusUse for hash table lookup, or anything where one collision in 2^^32 is
209240086Sglebiusacceptable.  Do NOT use for cryptographic purposes.
210240086Sglebius-------------------------------------------------------------------------------
211240086Sglebius*/
212240086Sglebius
213240086Sglebiusuint32_t jenkins_hash( const void *key, size_t length, uint32_t initval)
214240086Sglebius{
215240086Sglebius  uint32_t a,b,c;                                          /* internal state */
216240086Sglebius  union { const void *ptr; size_t i; } u;     /* needed for Mac Powerbook G4 */
217240086Sglebius
218240086Sglebius  /* Set up the internal state */
219240086Sglebius  a = b = c = 0xdeadbeef + ((uint32_t)length) + initval;
220240086Sglebius
221240086Sglebius  u.ptr = key;
222240086Sglebius  if ((u.i & 0x3) == 0) {
223240086Sglebius    const uint32_t *k = (const uint32_t *)key;         /* read 32-bit chunks */
224240086Sglebius
225240086Sglebius    /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
226240086Sglebius    while (length > 12)
227240086Sglebius    {
228240086Sglebius      a += k[0];
229240086Sglebius      b += k[1];
230240086Sglebius      c += k[2];
231240086Sglebius      mix(a,b,c);
232240086Sglebius      length -= 12;
233240086Sglebius      k += 3;
234240086Sglebius    }
235240086Sglebius
236240086Sglebius    /*----------------------------- handle the last (probably partial) block */
237240086Sglebius    /*
238240086Sglebius     * "k[2]&0xffffff" actually reads beyond the end of the string, but
239240086Sglebius     * then masks off the part it's not allowed to read.  Because the
240240086Sglebius     * string is aligned, the masked-off tail is in the same word as the
241240086Sglebius     * rest of the string.  Every machine with memory protection I've seen
242240086Sglebius     * does it on word boundaries, so is OK with this.  But VALGRIND will
243240086Sglebius     * still catch it and complain.  The masking trick does make the hash
244240086Sglebius     * noticably faster for short strings (like English words).
245240086Sglebius     */
246240086Sglebius
247240086Sglebius    switch(length)
248240086Sglebius    {
249240086Sglebius    case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
250240086Sglebius    case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break;
251240086Sglebius    case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break;
252240086Sglebius    case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break;
253240086Sglebius    case 8 : b+=k[1]; a+=k[0]; break;
254240086Sglebius    case 7 : b+=k[1]&0xffffff; a+=k[0]; break;
255240086Sglebius    case 6 : b+=k[1]&0xffff; a+=k[0]; break;
256240086Sglebius    case 5 : b+=k[1]&0xff; a+=k[0]; break;
257240086Sglebius    case 4 : a+=k[0]; break;
258240086Sglebius    case 3 : a+=k[0]&0xffffff; break;
259240086Sglebius    case 2 : a+=k[0]&0xffff; break;
260240086Sglebius    case 1 : a+=k[0]&0xff; break;
261240086Sglebius    case 0 : return c;              /* zero length strings require no mixing */
262240086Sglebius    }
263240086Sglebius
264240086Sglebius  } else if ((u.i & 0x1) == 0) {
265240086Sglebius    const uint16_t *k = (const uint16_t *)key;         /* read 16-bit chunks */
266240086Sglebius    const uint8_t  *k8;
267240086Sglebius
268240086Sglebius    /*--------------- all but last block: aligned reads and different mixing */
269240086Sglebius    while (length > 12)
270240086Sglebius    {
271240086Sglebius      a += k[0] + (((uint32_t)k[1])<<16);
272240086Sglebius      b += k[2] + (((uint32_t)k[3])<<16);
273240086Sglebius      c += k[4] + (((uint32_t)k[5])<<16);
274240086Sglebius      mix(a,b,c);
275240086Sglebius      length -= 12;
276240086Sglebius      k += 6;
277240086Sglebius    }
278240086Sglebius
279240086Sglebius    /*----------------------------- handle the last (probably partial) block */
280240086Sglebius    k8 = (const uint8_t *)k;
281240086Sglebius    switch(length)
282240086Sglebius    {
283240086Sglebius    case 12: c+=k[4]+(((uint32_t)k[5])<<16);
284240086Sglebius             b+=k[2]+(((uint32_t)k[3])<<16);
285240086Sglebius             a+=k[0]+(((uint32_t)k[1])<<16);
286240086Sglebius             break;
287240086Sglebius    case 11: c+=((uint32_t)k8[10])<<16;     /* fall through */
288240086Sglebius    case 10: c+=k[4];
289240086Sglebius             b+=k[2]+(((uint32_t)k[3])<<16);
290240086Sglebius             a+=k[0]+(((uint32_t)k[1])<<16);
291240086Sglebius             break;
292240086Sglebius    case 9 : c+=k8[8];                      /* fall through */
293240086Sglebius    case 8 : b+=k[2]+(((uint32_t)k[3])<<16);
294240086Sglebius             a+=k[0]+(((uint32_t)k[1])<<16);
295240086Sglebius             break;
296240086Sglebius    case 7 : b+=((uint32_t)k8[6])<<16;      /* fall through */
297240086Sglebius    case 6 : b+=k[2];
298240086Sglebius             a+=k[0]+(((uint32_t)k[1])<<16);
299240086Sglebius             break;
300240086Sglebius    case 5 : b+=k8[4];                      /* fall through */
301240086Sglebius    case 4 : a+=k[0]+(((uint32_t)k[1])<<16);
302240086Sglebius             break;
303240086Sglebius    case 3 : a+=((uint32_t)k8[2])<<16;      /* fall through */
304240086Sglebius    case 2 : a+=k[0];
305240086Sglebius             break;
306240086Sglebius    case 1 : a+=k8[0];
307240086Sglebius             break;
308240086Sglebius    case 0 : return c;                     /* zero length requires no mixing */
309240086Sglebius    }
310240086Sglebius
311240086Sglebius  } else {                        /* need to read the key one byte at a time */
312240086Sglebius    const uint8_t *k = (const uint8_t *)key;
313240086Sglebius
314240086Sglebius    /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
315240086Sglebius    while (length > 12)
316240086Sglebius    {
317240086Sglebius      a += k[0];
318240086Sglebius      a += ((uint32_t)k[1])<<8;
319240086Sglebius      a += ((uint32_t)k[2])<<16;
320240086Sglebius      a += ((uint32_t)k[3])<<24;
321240086Sglebius      b += k[4];
322240086Sglebius      b += ((uint32_t)k[5])<<8;
323240086Sglebius      b += ((uint32_t)k[6])<<16;
324240086Sglebius      b += ((uint32_t)k[7])<<24;
325240086Sglebius      c += k[8];
326240086Sglebius      c += ((uint32_t)k[9])<<8;
327240086Sglebius      c += ((uint32_t)k[10])<<16;
328240086Sglebius      c += ((uint32_t)k[11])<<24;
329240086Sglebius      mix(a,b,c);
330240086Sglebius      length -= 12;
331240086Sglebius      k += 12;
332240086Sglebius    }
333240086Sglebius
334240086Sglebius    /*-------------------------------- last block: affect all 32 bits of (c) */
335240086Sglebius    switch(length)                   /* all the case statements fall through */
336240086Sglebius    {
337240086Sglebius    case 12: c+=((uint32_t)k[11])<<24;
338240086Sglebius    case 11: c+=((uint32_t)k[10])<<16;
339240086Sglebius    case 10: c+=((uint32_t)k[9])<<8;
340240086Sglebius    case 9 : c+=k[8];
341240086Sglebius    case 8 : b+=((uint32_t)k[7])<<24;
342240086Sglebius    case 7 : b+=((uint32_t)k[6])<<16;
343240086Sglebius    case 6 : b+=((uint32_t)k[5])<<8;
344240086Sglebius    case 5 : b+=k[4];
345240086Sglebius    case 4 : a+=((uint32_t)k[3])<<24;
346240086Sglebius    case 3 : a+=((uint32_t)k[2])<<16;
347240086Sglebius    case 2 : a+=((uint32_t)k[1])<<8;
348240086Sglebius    case 1 : a+=k[0];
349240086Sglebius             break;
350240086Sglebius    case 0 : return c;
351240086Sglebius    }
352240086Sglebius  }
353240086Sglebius
354240086Sglebius  final(a,b,c);
355240086Sglebius  return c;
356240086Sglebius}
357240086Sglebius
358240086Sglebius#else /* !(BYTE_ORDER == LITTLE_ENDIAN) */
359240086Sglebius
360240086Sglebius/*
361240086Sglebius * hashbig():
362240086Sglebius * This is the same as hashword() on big-endian machines.  It is different
363240086Sglebius * from hashlittle() on all machines.  hashbig() takes advantage of
364240086Sglebius * big-endian byte ordering.
365240086Sglebius */
366240086Sglebiusuint32_t jenkins_hash( const void *key, size_t length, uint32_t initval)
367240086Sglebius{
368240086Sglebius  uint32_t a,b,c;
369240086Sglebius  union { const void *ptr; size_t i; } u; /* to cast key to (size_t) happily */
370240086Sglebius
371240086Sglebius  /* Set up the internal state */
372240086Sglebius  a = b = c = 0xdeadbeef + ((uint32_t)length) + initval;
373240086Sglebius
374240086Sglebius  u.ptr = key;
375240086Sglebius  if ((u.i & 0x3) == 0) {
376240086Sglebius    const uint32_t *k = (const uint32_t *)key;         /* read 32-bit chunks */
377240086Sglebius
378240086Sglebius    /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
379240086Sglebius    while (length > 12)
380240086Sglebius    {
381240086Sglebius      a += k[0];
382240086Sglebius      b += k[1];
383240086Sglebius      c += k[2];
384240086Sglebius      mix(a,b,c);
385240086Sglebius      length -= 12;
386240086Sglebius      k += 3;
387240086Sglebius    }
388240086Sglebius
389240086Sglebius    /*----------------------------- handle the last (probably partial) block */
390240086Sglebius    /*
391240086Sglebius     * "k[2]<<8" actually reads beyond the end of the string, but
392240086Sglebius     * then shifts out the part it's not allowed to read.  Because the
393240086Sglebius     * string is aligned, the illegal read is in the same word as the
394240086Sglebius     * rest of the string.  Every machine with memory protection I've seen
395240086Sglebius     * does it on word boundaries, so is OK with this.  But VALGRIND will
396240086Sglebius     * still catch it and complain.  The masking trick does make the hash
397240086Sglebius     * noticably faster for short strings (like English words).
398240086Sglebius     */
399240086Sglebius
400240086Sglebius    switch(length)
401240086Sglebius    {
402240086Sglebius    case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
403240086Sglebius    case 11: c+=k[2]&0xffffff00; b+=k[1]; a+=k[0]; break;
404240086Sglebius    case 10: c+=k[2]&0xffff0000; b+=k[1]; a+=k[0]; break;
405240086Sglebius    case 9 : c+=k[2]&0xff000000; b+=k[1]; a+=k[0]; break;
406240086Sglebius    case 8 : b+=k[1]; a+=k[0]; break;
407240086Sglebius    case 7 : b+=k[1]&0xffffff00; a+=k[0]; break;
408240086Sglebius    case 6 : b+=k[1]&0xffff0000; a+=k[0]; break;
409240086Sglebius    case 5 : b+=k[1]&0xff000000; a+=k[0]; break;
410240086Sglebius    case 4 : a+=k[0]; break;
411240086Sglebius    case 3 : a+=k[0]&0xffffff00; break;
412240086Sglebius    case 2 : a+=k[0]&0xffff0000; break;
413240086Sglebius    case 1 : a+=k[0]&0xff000000; break;
414240086Sglebius    case 0 : return c;              /* zero length strings require no mixing */
415240086Sglebius    }
416240086Sglebius
417240086Sglebius  } else {                        /* need to read the key one byte at a time */
418240086Sglebius    const uint8_t *k = (const uint8_t *)key;
419240086Sglebius
420240086Sglebius    /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
421240086Sglebius    while (length > 12)
422240086Sglebius    {
423240086Sglebius      a += ((uint32_t)k[0])<<24;
424240086Sglebius      a += ((uint32_t)k[1])<<16;
425240086Sglebius      a += ((uint32_t)k[2])<<8;
426240086Sglebius      a += ((uint32_t)k[3]);
427240086Sglebius      b += ((uint32_t)k[4])<<24;
428240086Sglebius      b += ((uint32_t)k[5])<<16;
429240086Sglebius      b += ((uint32_t)k[6])<<8;
430240086Sglebius      b += ((uint32_t)k[7]);
431240086Sglebius      c += ((uint32_t)k[8])<<24;
432240086Sglebius      c += ((uint32_t)k[9])<<16;
433240086Sglebius      c += ((uint32_t)k[10])<<8;
434240086Sglebius      c += ((uint32_t)k[11]);
435240086Sglebius      mix(a,b,c);
436240086Sglebius      length -= 12;
437240086Sglebius      k += 12;
438240086Sglebius    }
439240086Sglebius
440240086Sglebius    /*-------------------------------- last block: affect all 32 bits of (c) */
441240086Sglebius    switch(length)                   /* all the case statements fall through */
442240086Sglebius    {
443240086Sglebius    case 12: c+=k[11];
444240086Sglebius    case 11: c+=((uint32_t)k[10])<<8;
445240086Sglebius    case 10: c+=((uint32_t)k[9])<<16;
446240086Sglebius    case 9 : c+=((uint32_t)k[8])<<24;
447240086Sglebius    case 8 : b+=k[7];
448240086Sglebius    case 7 : b+=((uint32_t)k[6])<<8;
449240086Sglebius    case 6 : b+=((uint32_t)k[5])<<16;
450240086Sglebius    case 5 : b+=((uint32_t)k[4])<<24;
451240086Sglebius    case 4 : a+=k[3];
452240086Sglebius    case 3 : a+=((uint32_t)k[2])<<8;
453240086Sglebius    case 2 : a+=((uint32_t)k[1])<<16;
454240086Sglebius    case 1 : a+=((uint32_t)k[0])<<24;
455240086Sglebius             break;
456240086Sglebius    case 0 : return c;
457240086Sglebius    }
458240086Sglebius  }
459240086Sglebius
460240086Sglebius  final(a,b,c);
461240086Sglebius  return c;
462240086Sglebius}
463240086Sglebius#endif
464