xref: /illumos-gate/usr/src/common/crypto/edonr/edonr.c (revision 8b2aab4f)
1 /*
2  * IDI,NTNU
3  *
4  * CDDL HEADER START
5  *
6  * The contents of this file are subject to the terms of the
7  * Common Development and Distribution License (the "License").
8  * You may not use this file except in compliance with the License.
9  *
10  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
11  * or http://opensource.org/licenses/CDDL-1.0.
12  * See the License for the specific language governing permissions
13  * and limitations under the License.
14  *
15  * When distributing Covered Code, include this CDDL HEADER in each
16  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
17  * If applicable, add the following below this CDDL HEADER, with the
18  * fields enclosed by brackets "[]" replaced with your own identifying
19  * information: Portions Copyright [yyyy] [name of copyright owner]
20  *
21  * CDDL HEADER END
22  *
23  * Copyright (C) 2009, 2010, Jorn Amundsen <jorn.amundsen@ntnu.no>
24  * Tweaked Edon-R implementation for SUPERCOP, based on NIST API.
25  *
26  */
27 /*
28  * Portions copyright (c) 2013, Saso Kiselkov, All rights reserved
29  * Copyright 2016 Gary Mills
30  */
31 
32 /* determine where we can get bcopy/bzero declarations */
33 #ifdef	_KERNEL
34 #include <sys/systm.h>
35 #else
36 #include <strings.h>
37 #endif
38 #include <sys/edonr.h>
39 #include <sys/debug.h>
40 
41 /* big endian support, provides no-op's if run on little endian hosts */
42 #include "edonr_byteorder.h"
43 
44 #define	hashState224(x)	((x)->pipe->p256)
45 #define	hashState256(x)	((x)->pipe->p256)
46 #define	hashState384(x)	((x)->pipe->p512)
47 #define	hashState512(x)	((x)->pipe->p512)
48 
49 /* shift and rotate shortcuts */
50 #define	shl(x, n)	((x) << n)
51 #define	shr(x, n)	((x) >> n)
52 
53 #define	rotl32(x, n)	(((x) << (n)) | ((x) >> (32 - (n))))
54 #define	rotr32(x, n)	(((x) >> (n)) | ((x) << (32 - (n))))
55 
56 #define	rotl64(x, n)	(((x) << (n)) | ((x) >> (64 - (n))))
57 #define	rotr64(x, n)	(((x) >> (n)) | ((x) << (64 - (n))))
58 
59 #if !defined(__C99_RESTRICT)
60 #define	restrict	/* restrict */
61 #endif
62 
63 #define	EDONR_VALID_HASHBITLEN(x) \
64 	((x) == 512 || (x) == 384 || (x) == 256 || (x) == 224)
65 
66 /* EdonR224 initial double chaining pipe */
67 static const uint32_t i224p2[16] = {
68 	0x00010203ul, 0x04050607ul, 0x08090a0bul, 0x0c0d0e0ful,
69 	0x10111213ul, 0x14151617ul, 0x18191a1bul, 0x1c1d1e1ful,
70 	0x20212223ul, 0x24252627ul, 0x28292a2bul, 0x2c2d2e2ful,
71 	0x30313233ul, 0x34353637ul, 0x38393a3bul, 0x3c3d3e3ful,
72 };
73 
74 /* EdonR256 initial double chaining pipe */
75 static const uint32_t i256p2[16] = {
76 	0x40414243ul, 0x44454647ul, 0x48494a4bul, 0x4c4d4e4ful,
77 	0x50515253ul, 0x54555657ul, 0x58595a5bul, 0x5c5d5e5ful,
78 	0x60616263ul, 0x64656667ul, 0x68696a6bul, 0x6c6d6e6ful,
79 	0x70717273ul, 0x74757677ul, 0x78797a7bul, 0x7c7d7e7ful,
80 };
81 
82 /* EdonR384 initial double chaining pipe */
83 static const uint64_t i384p2[16] = {
84 	0x0001020304050607ull, 0x08090a0b0c0d0e0full,
85 	0x1011121314151617ull, 0x18191a1b1c1d1e1full,
86 	0x2021222324252627ull, 0x28292a2b2c2d2e2full,
87 	0x3031323334353637ull, 0x38393a3b3c3d3e3full,
88 	0x4041424344454647ull, 0x48494a4b4c4d4e4full,
89 	0x5051525354555657ull, 0x58595a5b5c5d5e5full,
90 	0x6061626364656667ull, 0x68696a6b6c6d6e6full,
91 	0x7071727374757677ull, 0x78797a7b7c7d7e7full
92 };
93 
94 /* EdonR512 initial double chaining pipe */
95 static const uint64_t i512p2[16] = {
96 	0x8081828384858687ull, 0x88898a8b8c8d8e8full,
97 	0x9091929394959697ull, 0x98999a9b9c9d9e9full,
98 	0xa0a1a2a3a4a5a6a7ull, 0xa8a9aaabacadaeafull,
99 	0xb0b1b2b3b4b5b6b7ull, 0xb8b9babbbcbdbebfull,
100 	0xc0c1c2c3c4c5c6c7ull, 0xc8c9cacbcccdcecfull,
101 	0xd0d1d2d3d4d5d6d7ull, 0xd8d9dadbdcdddedfull,
102 	0xe0e1e2e3e4e5e6e7ull, 0xe8e9eaebecedeeefull,
103 	0xf0f1f2f3f4f5f6f7ull, 0xf8f9fafbfcfdfeffull
104 };
105 
106 /*
107  * First Latin Square
108  * 0   7   1   3   2   4   6   5
109  * 4   1   7   6   3   0   5   2
110  * 7   0   4   2   5   3   1   6
111  * 1   4   0   5   6   2   7   3
112  * 2   3   6   7   1   5   0   4
113  * 5   2   3   1   7   6   4   0
114  * 3   6   5   0   4   7   2   1
115  * 6   5   2   4   0   1   3   7
116  */
117 #define	LS1_256(c, x0, x1, x2, x3, x4, x5, x6, x7)			\
118 {									\
119 	uint32_t x04, x17, x23, x56, x07, x26;				\
120 	x04 = x0+x4, x17 = x1+x7, x07 = x04+x17;			\
121 	s0 = c + x07 + x2;						\
122 	s1 = rotl32(x07 + x3, 4);					\
123 	s2 = rotl32(x07 + x6, 8);					\
124 	x23 = x2 + x3;							\
125 	s5 = rotl32(x04 + x23 + x5, 22);				\
126 	x56 = x5 + x6;							\
127 	s6 = rotl32(x17 + x56 + x0, 24);				\
128 	x26 = x23+x56;							\
129 	s3 = rotl32(x26 + x7, 13);					\
130 	s4 = rotl32(x26 + x1, 17);					\
131 	s7 = rotl32(x26 + x4, 29);					\
132 }
133 
134 #define	LS1_512(c, x0, x1, x2, x3, x4, x5, x6, x7)			\
135 {									\
136 	uint64_t x04, x17, x23, x56, x07, x26;				\
137 	x04 = x0+x4, x17 = x1+x7, x07 = x04+x17;			\
138 	s0 = c + x07 + x2;						\
139 	s1 = rotl64(x07 + x3, 5);					\
140 	s2 = rotl64(x07 + x6, 15);					\
141 	x23 = x2 + x3;							\
142 	s5 = rotl64(x04 + x23 + x5, 40);				\
143 	x56 = x5 + x6;							\
144 	s6 = rotl64(x17 + x56 + x0, 50);				\
145 	x26 = x23+x56;							\
146 	s3 = rotl64(x26 + x7, 22);					\
147 	s4 = rotl64(x26 + x1, 31);					\
148 	s7 = rotl64(x26 + x4, 59);					\
149 }
150 
151 /*
152  * Second Orthogonal Latin Square
153  * 0   4   2   3   1   6   5   7
154  * 7   6   3   2   5   4   1   0
155  * 5   3   1   6   0   2   7   4
156  * 1   0   5   4   3   7   2   6
157  * 2   1   0   7   4   5   6   3
158  * 3   5   7   0   6   1   4   2
159  * 4   7   6   1   2   0   3   5
160  * 6   2   4   5   7   3   0   1
161  */
162 #define	LS2_256(c, y0, y1, y2, y3, y4, y5, y6, y7)			\
163 {									\
164 	uint32_t y01, y25, y34, y67, y04, y05, y27, y37;		\
165 	y01 = y0+y1, y25 = y2+y5, y05 = y01+y25;			\
166 	t0  = ~c + y05 + y7;						\
167 	t2 = rotl32(y05 + y3, 9);					\
168 	y34 = y3+y4, y04 = y01+y34;					\
169 	t1 = rotl32(y04 + y6, 5);					\
170 	t4 = rotl32(y04 + y5, 15);					\
171 	y67 = y6+y7, y37 = y34+y67;					\
172 	t3 = rotl32(y37 + y2, 11);					\
173 	t7 = rotl32(y37 + y0, 27);					\
174 	y27 = y25+y67;							\
175 	t5 = rotl32(y27 + y4, 20);					\
176 	t6 = rotl32(y27 + y1, 25);					\
177 }
178 
179 #define	LS2_512(c, y0, y1, y2, y3, y4, y5, y6, y7)			\
180 {									\
181 	uint64_t y01, y25, y34, y67, y04, y05, y27, y37;		\
182 	y01 = y0+y1, y25 = y2+y5, y05 = y01+y25;			\
183 	t0  = ~c + y05 + y7;						\
184 	t2 = rotl64(y05 + y3, 19);					\
185 	y34 = y3+y4, y04 = y01+y34;					\
186 	t1 = rotl64(y04 + y6, 10);					\
187 	t4 = rotl64(y04 + y5, 36);					\
188 	y67 = y6+y7, y37 = y34+y67;					\
189 	t3 = rotl64(y37 + y2, 29);					\
190 	t7 = rotl64(y37 + y0, 55);					\
191 	y27 = y25+y67;							\
192 	t5 = rotl64(y27 + y4, 44);					\
193 	t6 = rotl64(y27 + y1, 48);					\
194 }
195 
196 #define	quasi_exform256(r0, r1, r2, r3, r4, r5, r6, r7)			\
197 {									\
198 	uint32_t s04, s17, s23, s56, t01, t25, t34, t67;		\
199 	s04 = s0 ^ s4, t01 = t0 ^ t1;					\
200 	r0 = (s04 ^ s1) + (t01 ^ t5);					\
201 	t67 = t6 ^ t7;							\
202 	r1 = (s04 ^ s7) + (t2 ^ t67);					\
203 	s23 = s2 ^ s3;							\
204 	r7 = (s23 ^ s5) + (t4 ^ t67);					\
205 	t34 = t3 ^ t4;							\
206 	r3 = (s23 ^ s4) + (t0 ^ t34);					\
207 	s56 = s5 ^ s6;							\
208 	r5 = (s3 ^ s56) + (t34 ^ t6);					\
209 	t25 = t2 ^ t5;							\
210 	r6 = (s2 ^ s56) + (t25 ^ t7);					\
211 	s17 = s1 ^ s7;							\
212 	r4 = (s0 ^ s17) + (t1 ^ t25);					\
213 	r2 = (s17 ^ s6) + (t01 ^ t3);					\
214 }
215 
216 #define	quasi_exform512(r0, r1, r2, r3, r4, r5, r6, r7)			\
217 {									\
218 	uint64_t s04, s17, s23, s56, t01, t25, t34, t67;		\
219 	s04 = s0 ^ s4, t01 = t0 ^ t1;					\
220 	r0 = (s04 ^ s1) + (t01 ^ t5);					\
221 	t67 = t6 ^ t7;							\
222 	r1 = (s04 ^ s7) + (t2 ^ t67);					\
223 	s23 = s2 ^ s3;							\
224 	r7 = (s23 ^ s5) + (t4 ^ t67);					\
225 	t34 = t3 ^ t4;							\
226 	r3 = (s23 ^ s4) + (t0 ^ t34);					\
227 	s56 = s5 ^ s6;							\
228 	r5 = (s3 ^ s56) + (t34 ^ t6);					\
229 	t25 = t2 ^ t5;							\
230 	r6 = (s2 ^ s56) + (t25 ^ t7);					\
231 	s17 = s1 ^ s7;							\
232 	r4 = (s0 ^ s17) + (t1 ^ t25);					\
233 	r2 = (s17 ^ s6) + (t01 ^ t3);					\
234 }
235 
236 static size_t
237 Q256(size_t bitlen, const uint32_t *data, uint32_t *restrict p)
238 {
239 	size_t bl;
240 
241 	for (bl = bitlen; bl >= EdonR256_BLOCK_BITSIZE;
242 	    bl -= EdonR256_BLOCK_BITSIZE, data += 16) {
243 		uint32_t s0, s1, s2, s3, s4, s5, s6, s7, t0, t1, t2, t3, t4,
244 		    t5, t6, t7;
245 		uint32_t p0, p1, p2, p3, p4, p5, p6, p7, q0, q1, q2, q3, q4,
246 		    q5, q6, q7;
247 		const uint32_t defix = 0xaaaaaaaa;
248 #if defined(MACHINE_IS_BIG_ENDIAN)
249 		uint32_t swp0, swp1, swp2, swp3, swp4, swp5, swp6, swp7, swp8,
250 		    swp9, swp10, swp11, swp12, swp13, swp14, swp15;
251 #define	d(j)	swp ## j
252 #define	s32(j)	ld_swap32((uint32_t *)data + j, swp ## j)
253 #else
254 #define	d(j)	data[j]
255 #endif
256 
257 		/* First row of quasigroup e-transformations */
258 #if defined(MACHINE_IS_BIG_ENDIAN)
259 		s32(8);
260 		s32(9);
261 		s32(10);
262 		s32(11);
263 		s32(12);
264 		s32(13);
265 		s32(14);
266 		s32(15);
267 #endif
268 		LS1_256(defix, d(15), d(14), d(13), d(12), d(11), d(10), d(9),
269 		    d(8));
270 #if defined(MACHINE_IS_BIG_ENDIAN)
271 		s32(0);
272 		s32(1);
273 		s32(2);
274 		s32(3);
275 		s32(4);
276 		s32(5);
277 		s32(6);
278 		s32(7);
279 #undef s32
280 #endif
281 		LS2_256(defix, d(0), d(1), d(2), d(3), d(4), d(5), d(6), d(7));
282 		quasi_exform256(p0, p1, p2, p3, p4, p5, p6, p7);
283 
284 		LS1_256(defix, p0, p1, p2, p3, p4, p5, p6, p7);
285 		LS2_256(defix, d(8), d(9), d(10), d(11), d(12), d(13), d(14),
286 		    d(15));
287 		quasi_exform256(q0, q1, q2, q3, q4, q5, q6, q7);
288 
289 		/* Second row of quasigroup e-transformations */
290 		LS1_256(defix, p[8], p[9], p[10], p[11], p[12], p[13], p[14],
291 		    p[15]);
292 		LS2_256(defix, p0, p1, p2, p3, p4, p5, p6, p7);
293 		quasi_exform256(p0, p1, p2, p3, p4, p5, p6, p7);
294 
295 		LS1_256(defix, p0, p1, p2, p3, p4, p5, p6, p7);
296 		LS2_256(defix, q0, q1, q2, q3, q4, q5, q6, q7);
297 		quasi_exform256(q0, q1, q2, q3, q4, q5, q6, q7);
298 
299 		/* Third row of quasigroup e-transformations */
300 		LS1_256(defix, p0, p1, p2, p3, p4, p5, p6, p7);
301 		LS2_256(defix, p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7]);
302 		quasi_exform256(p0, p1, p2, p3, p4, p5, p6, p7);
303 
304 		LS1_256(defix, q0, q1, q2, q3, q4, q5, q6, q7);
305 		LS2_256(defix, p0, p1, p2, p3, p4, p5, p6, p7);
306 		quasi_exform256(q0, q1, q2, q3, q4, q5, q6, q7);
307 
308 		/* Fourth row of quasigroup e-transformations */
309 		LS1_256(defix, d(7), d(6), d(5), d(4), d(3), d(2), d(1), d(0));
310 		LS2_256(defix, p0, p1, p2, p3, p4, p5, p6, p7);
311 		quasi_exform256(p0, p1, p2, p3, p4, p5, p6, p7);
312 
313 		LS1_256(defix, p0, p1, p2, p3, p4, p5, p6, p7);
314 		LS2_256(defix, q0, q1, q2, q3, q4, q5, q6, q7);
315 		quasi_exform256(q0, q1, q2, q3, q4, q5, q6, q7);
316 
317 		/* Edon-R tweak on the original SHA-3 Edon-R submission. */
318 		p[0] ^= d(8) ^ p0;
319 		p[1] ^= d(9) ^ p1;
320 		p[2] ^= d(10) ^ p2;
321 		p[3] ^= d(11) ^ p3;
322 		p[4] ^= d(12) ^ p4;
323 		p[5] ^= d(13) ^ p5;
324 		p[6] ^= d(14) ^ p6;
325 		p[7] ^= d(15) ^ p7;
326 		p[8] ^= d(0) ^ q0;
327 		p[9] ^= d(1) ^ q1;
328 		p[10] ^= d(2) ^ q2;
329 		p[11] ^= d(3) ^ q3;
330 		p[12] ^= d(4) ^ q4;
331 		p[13] ^= d(5) ^ q5;
332 		p[14] ^= d(6) ^ q6;
333 		p[15] ^= d(7) ^ q7;
334 	}
335 
336 #undef d
337 	return (bitlen - bl);
338 }
339 
340 #if defined(__IBMC__) && defined(_AIX) && defined(__64BIT__)
341 static inline size_t
342 #else
343 static size_t
344 #endif
345 Q512(size_t bitlen, const uint64_t *data, uint64_t *restrict p)
346 {
347 	size_t bl;
348 
349 	for (bl = bitlen; bl >= EdonR512_BLOCK_BITSIZE;
350 	    bl -= EdonR512_BLOCK_BITSIZE, data += 16) {
351 		uint64_t s0, s1, s2, s3, s4, s5, s6, s7, t0, t1, t2, t3, t4,
352 		    t5, t6, t7;
353 		uint64_t p0, p1, p2, p3, p4, p5, p6, p7, q0, q1, q2, q3, q4,
354 		    q5, q6, q7;
355 		const uint64_t defix = 0xaaaaaaaaaaaaaaaaull;
356 #if defined(MACHINE_IS_BIG_ENDIAN)
357 		uint64_t swp0, swp1, swp2, swp3, swp4, swp5, swp6, swp7, swp8,
358 		    swp9, swp10, swp11, swp12, swp13, swp14, swp15;
359 #define	d(j)	swp##j
360 #define	s64(j)	ld_swap64((uint64_t *)data+j, swp##j)
361 #else
362 #define	d(j)	data[j]
363 #endif
364 
365 		/* First row of quasigroup e-transformations */
366 #if defined(MACHINE_IS_BIG_ENDIAN)
367 		s64(8);
368 		s64(9);
369 		s64(10);
370 		s64(11);
371 		s64(12);
372 		s64(13);
373 		s64(14);
374 		s64(15);
375 #endif
376 		LS1_512(defix, d(15), d(14), d(13), d(12), d(11), d(10), d(9),
377 		    d(8));
378 #if defined(MACHINE_IS_BIG_ENDIAN)
379 		s64(0);
380 		s64(1);
381 		s64(2);
382 		s64(3);
383 		s64(4);
384 		s64(5);
385 		s64(6);
386 		s64(7);
387 #undef s64
388 #endif
389 		LS2_512(defix, d(0), d(1), d(2), d(3), d(4), d(5), d(6), d(7));
390 		quasi_exform512(p0, p1, p2, p3, p4, p5, p6, p7);
391 
392 		LS1_512(defix, p0, p1, p2, p3, p4, p5, p6, p7);
393 		LS2_512(defix, d(8), d(9), d(10), d(11), d(12), d(13), d(14),
394 		    d(15));
395 		quasi_exform512(q0, q1, q2, q3, q4, q5, q6, q7);
396 
397 		/* Second row of quasigroup e-transformations */
398 		LS1_512(defix, p[8], p[9], p[10], p[11], p[12], p[13], p[14],
399 		    p[15]);
400 		LS2_512(defix, p0, p1, p2, p3, p4, p5, p6, p7);
401 		quasi_exform512(p0, p1, p2, p3, p4, p5, p6, p7);
402 
403 		LS1_512(defix, p0, p1, p2, p3, p4, p5, p6, p7);
404 		LS2_512(defix, q0, q1, q2, q3, q4, q5, q6, q7);
405 		quasi_exform512(q0, q1, q2, q3, q4, q5, q6, q7);
406 
407 		/* Third row of quasigroup e-transformations */
408 		LS1_512(defix, p0, p1, p2, p3, p4, p5, p6, p7);
409 		LS2_512(defix, p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7]);
410 		quasi_exform512(p0, p1, p2, p3, p4, p5, p6, p7);
411 
412 		LS1_512(defix, q0, q1, q2, q3, q4, q5, q6, q7);
413 		LS2_512(defix, p0, p1, p2, p3, p4, p5, p6, p7);
414 		quasi_exform512(q0, q1, q2, q3, q4, q5, q6, q7);
415 
416 		/* Fourth row of quasigroup e-transformations */
417 		LS1_512(defix, d(7), d(6), d(5), d(4), d(3), d(2), d(1), d(0));
418 		LS2_512(defix, p0, p1, p2, p3, p4, p5, p6, p7);
419 		quasi_exform512(p0, p1, p2, p3, p4, p5, p6, p7);
420 
421 		LS1_512(defix, p0, p1, p2, p3, p4, p5, p6, p7);
422 		LS2_512(defix, q0, q1, q2, q3, q4, q5, q6, q7);
423 		quasi_exform512(q0, q1, q2, q3, q4, q5, q6, q7);
424 
425 		/* Edon-R tweak on the original SHA-3 Edon-R submission. */
426 		p[0] ^= d(8) ^ p0;
427 		p[1] ^= d(9) ^ p1;
428 		p[2] ^= d(10) ^ p2;
429 		p[3] ^= d(11) ^ p3;
430 		p[4] ^= d(12) ^ p4;
431 		p[5] ^= d(13) ^ p5;
432 		p[6] ^= d(14) ^ p6;
433 		p[7] ^= d(15) ^ p7;
434 		p[8] ^= d(0) ^ q0;
435 		p[9] ^= d(1) ^ q1;
436 		p[10] ^= d(2) ^ q2;
437 		p[11] ^= d(3) ^ q3;
438 		p[12] ^= d(4) ^ q4;
439 		p[13] ^= d(5) ^ q5;
440 		p[14] ^= d(6) ^ q6;
441 		p[15] ^= d(7) ^ q7;
442 	}
443 
444 #undef d
445 	return (bitlen - bl);
446 }
447 
448 void
449 EdonRInit(EdonRState *state, size_t hashbitlen)
450 {
451 	ASSERT(EDONR_VALID_HASHBITLEN(hashbitlen));
452 	switch (hashbitlen) {
453 	case 224:
454 		state->hashbitlen = 224;
455 		state->bits_processed = 0;
456 		state->unprocessed_bits = 0;
457 		bcopy(i224p2, hashState224(state)->DoublePipe,
458 		    16 * sizeof (uint32_t));
459 		break;
460 
461 	case 256:
462 		state->hashbitlen = 256;
463 		state->bits_processed = 0;
464 		state->unprocessed_bits = 0;
465 		bcopy(i256p2, hashState256(state)->DoublePipe,
466 		    16 * sizeof (uint32_t));
467 		break;
468 
469 	case 384:
470 		state->hashbitlen = 384;
471 		state->bits_processed = 0;
472 		state->unprocessed_bits = 0;
473 		bcopy(i384p2, hashState384(state)->DoublePipe,
474 		    16 * sizeof (uint64_t));
475 		break;
476 
477 	case 512:
478 		state->hashbitlen = 512;
479 		state->bits_processed = 0;
480 		state->unprocessed_bits = 0;
481 		bcopy(i512p2, hashState224(state)->DoublePipe,
482 		    16 * sizeof (uint64_t));
483 		break;
484 	}
485 }
486 
487 
488 void
489 EdonRUpdate(EdonRState *state, const uint8_t *data, size_t databitlen)
490 {
491 	uint32_t *data32;
492 	uint64_t *data64;
493 
494 	size_t bits_processed;
495 
496 	ASSERT(EDONR_VALID_HASHBITLEN(state->hashbitlen));
497 	switch (state->hashbitlen) {
498 	case 224:
499 	case 256:
500 		if (state->unprocessed_bits > 0) {
501 			/* LastBytes = databitlen / 8 */
502 			int LastBytes = (int)databitlen >> 3;
503 
504 			ASSERT(state->unprocessed_bits + databitlen <=
505 			    EdonR256_BLOCK_SIZE * 8);
506 
507 			bcopy(data, hashState256(state)->LastPart
508 			    + (state->unprocessed_bits >> 3), LastBytes);
509 			state->unprocessed_bits += (int)databitlen;
510 			databitlen = state->unprocessed_bits;
511 			/* LINTED E_BAD_PTR_CAST_ALIGN */
512 			data32 = (uint32_t *)hashState256(state)->LastPart;
513 		} else
514 			/* LINTED E_BAD_PTR_CAST_ALIGN */
515 			data32 = (uint32_t *)data;
516 
517 		bits_processed = Q256(databitlen, data32,
518 		    hashState256(state)->DoublePipe);
519 		state->bits_processed += bits_processed;
520 		databitlen -= bits_processed;
521 		state->unprocessed_bits = (int)databitlen;
522 		if (databitlen > 0) {
523 			/* LastBytes = Ceil(databitlen / 8) */
524 			int LastBytes =
525 			    ((~(((-(int)databitlen) >> 3) & 0x01ff)) +
526 			    1) & 0x01ff;
527 
528 			data32 += bits_processed >> 5;	/* byte size update */
529 			bcopy(data32, hashState256(state)->LastPart, LastBytes);
530 		}
531 		break;
532 
533 	case 384:
534 	case 512:
535 		if (state->unprocessed_bits > 0) {
536 			/* LastBytes = databitlen / 8 */
537 			int LastBytes = (int)databitlen >> 3;
538 
539 			ASSERT(state->unprocessed_bits + databitlen <=
540 			    EdonR512_BLOCK_SIZE * 8);
541 
542 			bcopy(data, hashState512(state)->LastPart
543 			    + (state->unprocessed_bits >> 3), LastBytes);
544 			state->unprocessed_bits += (int)databitlen;
545 			databitlen = state->unprocessed_bits;
546 			/* LINTED E_BAD_PTR_CAST_ALIGN */
547 			data64 = (uint64_t *)hashState512(state)->LastPart;
548 		} else
549 			/* LINTED E_BAD_PTR_CAST_ALIGN */
550 			data64 = (uint64_t *)data;
551 
552 		bits_processed = Q512(databitlen, data64,
553 		    hashState512(state)->DoublePipe);
554 		state->bits_processed += bits_processed;
555 		databitlen -= bits_processed;
556 		state->unprocessed_bits = (int)databitlen;
557 		if (databitlen > 0) {
558 			/* LastBytes = Ceil(databitlen / 8) */
559 			int LastBytes =
560 			    ((~(((-(int)databitlen) >> 3) & 0x03ff)) +
561 			    1) & 0x03ff;
562 
563 			data64 += bits_processed >> 6;	/* byte size update */
564 			bcopy(data64, hashState512(state)->LastPart, LastBytes);
565 		}
566 		break;
567 	}
568 }
569 
570 void
571 EdonRFinal(EdonRState *state, uint8_t *hashval)
572 {
573 	uint32_t *data32;
574 	uint64_t *data64, num_bits;
575 
576 	size_t databitlen;
577 	int LastByte, PadOnePosition;
578 
579 	num_bits = state->bits_processed + state->unprocessed_bits;
580 	ASSERT(EDONR_VALID_HASHBITLEN(state->hashbitlen));
581 	switch (state->hashbitlen) {
582 	case 224:
583 	case 256:
584 		LastByte = (int)state->unprocessed_bits >> 3;
585 		PadOnePosition = 7 - (state->unprocessed_bits & 0x07);
586 		hashState256(state)->LastPart[LastByte] =
587 		    (hashState256(state)->LastPart[LastByte]
588 		    & (0xff << (PadOnePosition + 1))) ^
589 		    (0x01 << PadOnePosition);
590 		/* LINTED E_BAD_PTR_CAST_ALIGN */
591 		data64 = (uint64_t *)hashState256(state)->LastPart;
592 
593 		if (state->unprocessed_bits < 448) {
594 			(void) memset((hashState256(state)->LastPart) +
595 			    LastByte + 1, 0x00,
596 			    EdonR256_BLOCK_SIZE - LastByte - 9);
597 			databitlen = EdonR256_BLOCK_SIZE * 8;
598 #if defined(MACHINE_IS_BIG_ENDIAN)
599 			st_swap64(num_bits, data64 + 7);
600 #else
601 			data64[7] = num_bits;
602 #endif
603 		} else {
604 			(void) memset((hashState256(state)->LastPart) +
605 			    LastByte + 1, 0x00,
606 			    EdonR256_BLOCK_SIZE * 2 - LastByte - 9);
607 			databitlen = EdonR256_BLOCK_SIZE * 16;
608 #if defined(MACHINE_IS_BIG_ENDIAN)
609 			st_swap64(num_bits, data64 + 15);
610 #else
611 			data64[15] = num_bits;
612 #endif
613 		}
614 
615 		/* LINTED E_BAD_PTR_CAST_ALIGN */
616 		data32 = (uint32_t *)hashState256(state)->LastPart;
617 		state->bits_processed += Q256(databitlen, data32,
618 		    hashState256(state)->DoublePipe);
619 		break;
620 
621 	case 384:
622 	case 512:
623 		LastByte = (int)state->unprocessed_bits >> 3;
624 		PadOnePosition = 7 - (state->unprocessed_bits & 0x07);
625 		hashState512(state)->LastPart[LastByte] =
626 		    (hashState512(state)->LastPart[LastByte]
627 		    & (0xff << (PadOnePosition + 1))) ^
628 		    (0x01 << PadOnePosition);
629 		/* LINTED E_BAD_PTR_CAST_ALIGN */
630 		data64 = (uint64_t *)hashState512(state)->LastPart;
631 
632 		if (state->unprocessed_bits < 960) {
633 			(void) memset((hashState512(state)->LastPart) +
634 			    LastByte + 1, 0x00,
635 			    EdonR512_BLOCK_SIZE - LastByte - 9);
636 			databitlen = EdonR512_BLOCK_SIZE * 8;
637 #if defined(MACHINE_IS_BIG_ENDIAN)
638 			st_swap64(num_bits, data64 + 15);
639 #else
640 			data64[15] = num_bits;
641 #endif
642 		} else {
643 			(void) memset((hashState512(state)->LastPart) +
644 			    LastByte + 1, 0x00,
645 			    EdonR512_BLOCK_SIZE * 2 - LastByte - 9);
646 			databitlen = EdonR512_BLOCK_SIZE * 16;
647 #if defined(MACHINE_IS_BIG_ENDIAN)
648 			st_swap64(num_bits, data64 + 31);
649 #else
650 			data64[31] = num_bits;
651 #endif
652 		}
653 
654 		state->bits_processed += Q512(databitlen, data64,
655 		    hashState512(state)->DoublePipe);
656 		break;
657 	}
658 
659 	switch (state->hashbitlen) {
660 	case 224: {
661 #if defined(MACHINE_IS_BIG_ENDIAN)
662 		/* LINTED: pointer cast may result in improper alignment */
663 		uint32_t *d32 = (uint32_t *)hashval;
664 		uint32_t *s32 = hashState224(state)->DoublePipe + 9;
665 		int j;
666 
667 		for (j = 0; j < EdonR224_DIGEST_SIZE >> 2; j++)
668 			st_swap32(s32[j], d32 + j);
669 #else
670 		bcopy(hashState256(state)->DoublePipe + 9, hashval,
671 		    EdonR224_DIGEST_SIZE);
672 #endif
673 		break;
674 	}
675 	case 256: {
676 #if defined(MACHINE_IS_BIG_ENDIAN)
677 		/* LINTED: pointer cast may result in improper alignment */
678 		uint32_t *d32 = (uint32_t *)hashval;
679 		uint32_t *s32 = hashState224(state)->DoublePipe + 8;
680 		int j;
681 
682 		for (j = 0; j < EdonR256_DIGEST_SIZE >> 2; j++)
683 			st_swap32(s32[j], d32 + j);
684 #else
685 		bcopy(hashState256(state)->DoublePipe + 8, hashval,
686 		    EdonR256_DIGEST_SIZE);
687 #endif
688 		break;
689 	}
690 	case 384: {
691 #if defined(MACHINE_IS_BIG_ENDIAN)
692 		/* LINTED: pointer cast may result in improper alignment */
693 		uint64_t *d64 = (uint64_t *)hashval;
694 		uint64_t *s64 = hashState384(state)->DoublePipe + 10;
695 		int j;
696 
697 		for (j = 0; j < EdonR384_DIGEST_SIZE >> 3; j++)
698 			st_swap64(s64[j], d64 + j);
699 #else
700 		bcopy(hashState384(state)->DoublePipe + 10, hashval,
701 		    EdonR384_DIGEST_SIZE);
702 #endif
703 		break;
704 	}
705 	case 512: {
706 #if defined(MACHINE_IS_BIG_ENDIAN)
707 		/* LINTED: pointer cast may result in improper alignment */
708 		uint64_t *d64 = (uint64_t *)hashval;
709 		uint64_t *s64 = hashState512(state)->DoublePipe + 8;
710 		int j;
711 
712 		for (j = 0; j < EdonR512_DIGEST_SIZE >> 3; j++)
713 			st_swap64(s64[j], d64 + j);
714 #else
715 		bcopy(hashState512(state)->DoublePipe + 8, hashval,
716 		    EdonR512_DIGEST_SIZE);
717 #endif
718 		break;
719 	}
720 	}
721 }
722 
723 
724 void
725 EdonRHash(size_t hashbitlen, const uint8_t *data, size_t databitlen,
726     uint8_t *hashval)
727 {
728 	EdonRState state;
729 
730 	EdonRInit(&state, hashbitlen);
731 	EdonRUpdate(&state, data, databitlen);
732 	EdonRFinal(&state, hashval);
733 }
734