diff options
Diffstat (limited to 'lib_generic')
-rw-r--r-- | lib_generic/Makefile | 1 | ||||
-rw-r--r-- | lib_generic/display_options.c | 36 | ||||
-rw-r--r-- | lib_generic/lmb.c | 152 | ||||
-rw-r--r-- | lib_generic/md5.c | 4 | ||||
-rw-r--r-- | lib_generic/sha1.c | 4 | ||||
-rw-r--r-- | lib_generic/sha256.c | 262 |
6 files changed, 394 insertions, 65 deletions
diff --git a/lib_generic/Makefile b/lib_generic/Makefile index dca3a6c..4f6ce73 100644 --- a/lib_generic/Makefile +++ b/lib_generic/Makefile @@ -38,6 +38,7 @@ COBJS-y += lmb.o COBJS-y += ldiv.o COBJS-$(CONFIG_MD5) += md5.o COBJS-y += sha1.o +COBJS-$(CONFIG_SHA256) += sha256.o COBJS-y += string.o COBJS-y += vsprintf.o COBJS-y += zlib.o diff --git a/lib_generic/display_options.c b/lib_generic/display_options.c index 5ddd94f..2dc2567 100644 --- a/lib_generic/display_options.c +++ b/lib_generic/display_options.c @@ -39,27 +39,35 @@ int display_options (void) } /* - * print sizes as "xxx kB", "xxx.y kB", "xxx MB" or "xxx.y MB" as needed; - * allow for optional trailing string (like "\n") + * print sizes as "xxx kB", "xxx.y kB", "xxx MB", "xxx.y MB", + * xxx GB, or xxx.y GB as needed; allow for optional trailing string + * (like "\n") */ -void print_size (ulong size, const char *s) +void print_size (phys_size_t size, const char *s) { - ulong m, n; - ulong d = 1 << 20; /* 1 MB */ - char c = 'M'; - - if (size < d) { /* print in kB */ - c = 'k'; - d = 1 << 10; + ulong m = 0, n; + phys_size_t d = 1 << 30; /* 1 GB */ + char c = 'G'; + + if (size < d) { /* try MB */ + c = 'M'; + d = 1 << 20; + if (size < d) { /* print in kB */ + c = 'k'; + d = 1 << 10; + } } n = size / d; - m = (10 * (size - (n * d)) + (d / 2) ) / d; + /* If there's a remainder, deal with it */ + if(size % d) { + m = (10 * (size - (n * d)) + (d / 2) ) / d; - if (m >= 10) { - m -= 10; - n += 1; + if (m >= 10) { + m -= 10; + n += 1; + } } printf ("%2ld", n); diff --git a/lib_generic/lmb.c b/lib_generic/lmb.c index 3b8c805..6b46fa8 100644 --- a/lib_generic/lmb.c +++ b/lib_generic/lmb.c @@ -22,33 +22,34 @@ void lmb_dump_all(struct lmb *lmb) debug("lmb_dump_all:\n"); debug(" memory.cnt = 0x%lx\n", lmb->memory.cnt); - debug(" memory.size = 0x%08x\n", lmb->memory.size); + debug(" memory.size = 0x%llx\n", + (unsigned long long)lmb->memory.size); for (i=0; i < lmb->memory.cnt ;i++) { - debug(" memory.reg[0x%x].base = 0x%08x\n", i, + debug(" memory.reg[0x%x].base = 0x%llx\n", i, lmb->memory.region[i].base); - debug(" .size = 0x%08x\n", + debug(" .size = 0x%llx\n", lmb->memory.region[i].size); } debug("\n reserved.cnt = 0x%lx\n", lmb->reserved.cnt); - debug(" reserved.size = 0x%08x\n", lmb->reserved.size); + debug(" reserved.size = 0x%llx\n", lmb->reserved.size); for (i=0; i < lmb->reserved.cnt ;i++) { - debug(" reserved.reg[0x%x].base = 0x%08x\n", i, + debug(" reserved.reg[0x%x].base = 0x%llx\n", i, lmb->reserved.region[i].base); - debug(" .size = 0x%08x\n", + debug(" .size = 0x%llx\n", lmb->reserved.region[i].size); } #endif /* DEBUG */ } -static unsigned long lmb_addrs_overlap(ulong base1, - ulong size1, ulong base2, ulong size2) +static long lmb_addrs_overlap(phys_addr_t base1, + phys_size_t size1, phys_addr_t base2, phys_size_t size2) { return ((base1 < (base2+size2)) && (base2 < (base1+size1))); } -static long lmb_addrs_adjacent(ulong base1, ulong size1, - ulong base2, ulong size2) +static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1, + phys_addr_t base2, phys_size_t size2) { if (base2 == base1 + size1) return 1; @@ -61,10 +62,10 @@ static long lmb_addrs_adjacent(ulong base1, ulong size1, static long lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1, unsigned long r2) { - ulong base1 = rgn->region[r1].base; - ulong size1 = rgn->region[r1].size; - ulong base2 = rgn->region[r2].base; - ulong size2 = rgn->region[r2].size; + phys_addr_t base1 = rgn->region[r1].base; + phys_size_t size1 = rgn->region[r1].size; + phys_addr_t base2 = rgn->region[r2].base; + phys_size_t size2 = rgn->region[r2].size; return lmb_addrs_adjacent(base1, size1, base2, size2); } @@ -106,7 +107,7 @@ void lmb_init(struct lmb *lmb) } /* This routine called with relocation disabled. */ -static long lmb_add_region(struct lmb_region *rgn, ulong base, ulong size) +static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t size) { unsigned long coalesced = 0; long adjacent, i; @@ -119,8 +120,8 @@ static long lmb_add_region(struct lmb_region *rgn, ulong base, ulong size) /* First try and coalesce this LMB with another. */ for (i=0; i < rgn->cnt; i++) { - ulong rgnbase = rgn->region[i].base; - ulong rgnsize = rgn->region[i].size; + phys_addr_t rgnbase = rgn->region[i].base; + phys_size_t rgnsize = rgn->region[i].size; if ((rgnbase == base) && (rgnsize == size)) /* Already have this region, so we're done */ @@ -173,28 +174,77 @@ static long lmb_add_region(struct lmb_region *rgn, ulong base, ulong size) } /* This routine may be called with relocation disabled. */ -long lmb_add(struct lmb *lmb, ulong base, ulong size) +long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size) { struct lmb_region *_rgn = &(lmb->memory); return lmb_add_region(_rgn, base, size); } -long lmb_reserve(struct lmb *lmb, ulong base, ulong size) +long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size) +{ + struct lmb_region *rgn = &(lmb->reserved); + phys_addr_t rgnbegin, rgnend; + phys_addr_t end = base + size; + int i; + + rgnbegin = rgnend = 0; /* supress gcc warnings */ + + /* Find the region where (base, size) belongs to */ + for (i=0; i < rgn->cnt; i++) { + rgnbegin = rgn->region[i].base; + rgnend = rgnbegin + rgn->region[i].size; + + if ((rgnbegin <= base) && (end <= rgnend)) + break; + } + + /* Didn't find the region */ + if (i == rgn->cnt) + return -1; + + /* Check to see if we are removing entire region */ + if ((rgnbegin == base) && (rgnend == end)) { + lmb_remove_region(rgn, i); + return 0; + } + + /* Check to see if region is matching at the front */ + if (rgnbegin == base) { + rgn->region[i].base = end; + rgn->region[i].size -= size; + return 0; + } + + /* Check to see if the region is matching at the end */ + if (rgnend == end) { + rgn->region[i].size -= size; + return 0; + } + + /* + * We need to split the entry - adjust the current one to the + * beginging of the hole and add the region after hole. + */ + rgn->region[i].size = base - rgn->region[i].base; + return lmb_add_region(rgn, end, rgnend - end); +} + +long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size) { struct lmb_region *_rgn = &(lmb->reserved); return lmb_add_region(_rgn, base, size); } -long lmb_overlaps_region(struct lmb_region *rgn, ulong base, - ulong size) +long lmb_overlaps_region(struct lmb_region *rgn, phys_addr_t base, + phys_size_t size) { unsigned long i; for (i=0; i < rgn->cnt; i++) { - ulong rgnbase = rgn->region[i].base; - ulong rgnsize = rgn->region[i].size; + phys_addr_t rgnbase = rgn->region[i].base; + phys_size_t rgnsize = rgn->region[i].size; if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) { break; } @@ -203,14 +253,14 @@ long lmb_overlaps_region(struct lmb_region *rgn, ulong base, return (i < rgn->cnt) ? i : -1; } -ulong lmb_alloc(struct lmb *lmb, ulong size, ulong align) +phys_addr_t lmb_alloc(struct lmb *lmb, phys_size_t size, ulong align) { return lmb_alloc_base(lmb, size, align, LMB_ALLOC_ANYWHERE); } -ulong lmb_alloc_base(struct lmb *lmb, ulong size, ulong align, ulong max_addr) +phys_addr_t lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr) { - ulong alloc; + phys_addr_t alloc; alloc = __lmb_alloc_base(lmb, size, align, max_addr); @@ -221,25 +271,28 @@ ulong lmb_alloc_base(struct lmb *lmb, ulong size, ulong align, ulong max_addr) return alloc; } -static ulong lmb_align_down(ulong addr, ulong size) +static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size) { return addr & ~(size - 1); } -static ulong lmb_align_up(ulong addr, ulong size) +static phys_addr_t lmb_align_up(phys_addr_t addr, ulong size) { return (addr + (size - 1)) & ~(size - 1); } -ulong __lmb_alloc_base(struct lmb *lmb, ulong size, ulong align, ulong max_addr) +phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr) { long i, j; - ulong base = 0; + phys_addr_t base = 0; + phys_addr_t res_base; for (i = lmb->memory.cnt-1; i >= 0; i--) { - ulong lmbbase = lmb->memory.region[i].base; - ulong lmbsize = lmb->memory.region[i].size; + phys_addr_t lmbbase = lmb->memory.region[i].base; + phys_size_t lmbsize = lmb->memory.region[i].size; + if (lmbsize < size) + continue; if (max_addr == LMB_ALLOC_ANYWHERE) base = lmb_align_down(lmbbase + lmbsize - size, align); else if (lmbbase < max_addr) { @@ -248,30 +301,31 @@ ulong __lmb_alloc_base(struct lmb *lmb, ulong size, ulong align, ulong max_addr) } else continue; - while ((lmbbase <= base) && - ((j = lmb_overlaps_region(&(lmb->reserved), base, size)) >= 0) ) - base = lmb_align_down(lmb->reserved.region[j].base - size, - align); - - if ((base != 0) && (lmbbase <= base)) - break; + while (base && lmbbase <= base) { + j = lmb_overlaps_region(&lmb->reserved, base, size); + if (j < 0) { + /* This area isn't reserved, take it */ + if (lmb_add_region(&lmb->reserved, base, + lmb_align_up(size, + align)) < 0) + return 0; + return base; + } + res_base = lmb->reserved.region[j].base; + if (res_base < size) + break; + base = lmb_align_down(res_base - size, align); + } } - - if (i < 0) - return 0; - - if (lmb_add_region(&(lmb->reserved), base, lmb_align_up(size, align)) < 0) - return 0; - - return base; + return 0; } -int lmb_is_reserved(struct lmb *lmb, ulong addr) +int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr) { int i; for (i = 0; i < lmb->reserved.cnt; i++) { - ulong upper = lmb->reserved.region[i].base + + phys_addr_t upper = lmb->reserved.region[i].base + lmb->reserved.region[i].size - 1; if ((addr >= lmb->reserved.region[i].base) && (addr <= upper)) return 1; diff --git a/lib_generic/md5.c b/lib_generic/md5.c index 78ef475..9150510 100644 --- a/lib_generic/md5.c +++ b/lib_generic/md5.c @@ -27,10 +27,12 @@ #ifndef USE_HOSTCC #include <common.h> +#include <linux/string.h> +#else +#include <string.h> #endif /* USE_HOSTCC */ #include <watchdog.h> #include <linux/types.h> -#include <linux/string.h> #include <u-boot/md5.h> static void diff --git a/lib_generic/sha1.c b/lib_generic/sha1.c index c8ef4d2..da5bc16 100644 --- a/lib_generic/sha1.c +++ b/lib_generic/sha1.c @@ -31,9 +31,11 @@ #ifndef USE_HOSTCC #include <common.h> +#include <linux/string.h> +#else +#include <string.h> #endif /* USE_HOSTCC */ #include <watchdog.h> -#include <linux/string.h> #include "sha1.h" /* diff --git a/lib_generic/sha256.c b/lib_generic/sha256.c new file mode 100644 index 0000000..deb63a4 --- /dev/null +++ b/lib_generic/sha256.c @@ -0,0 +1,262 @@ +/* + * FIPS-180-2 compliant SHA-256 implementation + * + * Copyright (C) 2001-2003 Christophe Devine + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef USE_HOSTCC +#include <common.h> +#endif /* USE_HOSTCC */ +#include <watchdog.h> +#include <linux/string.h> +#include <sha256.h> + +/* + * 32-bit integer manipulation macros (big endian) + */ +#ifndef GET_UINT32_BE +#define GET_UINT32_BE(n,b,i) { \ + (n) = ( (unsigned long) (b)[(i) ] << 24 ) \ + | ( (unsigned long) (b)[(i) + 1] << 16 ) \ + | ( (unsigned long) (b)[(i) + 2] << 8 ) \ + | ( (unsigned long) (b)[(i) + 3] ); \ +} +#endif +#ifndef PUT_UINT32_BE +#define PUT_UINT32_BE(n,b,i) { \ + (b)[(i) ] = (unsigned char) ( (n) >> 24 ); \ + (b)[(i) + 1] = (unsigned char) ( (n) >> 16 ); \ + (b)[(i) + 2] = (unsigned char) ( (n) >> 8 ); \ + (b)[(i) + 3] = (unsigned char) ( (n) ); \ +} +#endif + +void sha256_starts(sha256_context * ctx) +{ + ctx->total[0] = 0; + ctx->total[1] = 0; + + ctx->state[0] = 0x6A09E667; + ctx->state[1] = 0xBB67AE85; + ctx->state[2] = 0x3C6EF372; + ctx->state[3] = 0xA54FF53A; + ctx->state[4] = 0x510E527F; + ctx->state[5] = 0x9B05688C; + ctx->state[6] = 0x1F83D9AB; + ctx->state[7] = 0x5BE0CD19; +} + +void sha256_process(sha256_context * ctx, uint8_t data[64]) +{ + uint32_t temp1, temp2; + uint32_t W[64]; + uint32_t A, B, C, D, E, F, G, H; + + GET_UINT32_BE(W[0], data, 0); + GET_UINT32_BE(W[1], data, 4); + GET_UINT32_BE(W[2], data, 8); + GET_UINT32_BE(W[3], data, 12); + GET_UINT32_BE(W[4], data, 16); + GET_UINT32_BE(W[5], data, 20); + GET_UINT32_BE(W[6], data, 24); + GET_UINT32_BE(W[7], data, 28); + GET_UINT32_BE(W[8], data, 32); + GET_UINT32_BE(W[9], data, 36); + GET_UINT32_BE(W[10], data, 40); + GET_UINT32_BE(W[11], data, 44); + GET_UINT32_BE(W[12], data, 48); + GET_UINT32_BE(W[13], data, 52); + GET_UINT32_BE(W[14], data, 56); + GET_UINT32_BE(W[15], data, 60); + +#define SHR(x,n) ((x & 0xFFFFFFFF) >> n) +#define ROTR(x,n) (SHR(x,n) | (x << (32 - n))) + +#define S0(x) (ROTR(x, 7) ^ ROTR(x,18) ^ SHR(x, 3)) +#define S1(x) (ROTR(x,17) ^ ROTR(x,19) ^ SHR(x,10)) + +#define S2(x) (ROTR(x, 2) ^ ROTR(x,13) ^ ROTR(x,22)) +#define S3(x) (ROTR(x, 6) ^ ROTR(x,11) ^ ROTR(x,25)) + +#define F0(x,y,z) ((x & y) | (z & (x | y))) +#define F1(x,y,z) (z ^ (x & (y ^ z))) + +#define R(t) \ +( \ + W[t] = S1(W[t - 2]) + W[t - 7] + \ + S0(W[t - 15]) + W[t - 16] \ +) + +#define P(a,b,c,d,e,f,g,h,x,K) { \ + temp1 = h + S3(e) + F1(e,f,g) + K + x; \ + temp2 = S2(a) + F0(a,b,c); \ + d += temp1; h = temp1 + temp2; \ +} + + A = ctx->state[0]; + B = ctx->state[1]; + C = ctx->state[2]; + D = ctx->state[3]; + E = ctx->state[4]; + F = ctx->state[5]; + G = ctx->state[6]; + H = ctx->state[7]; + + P(A, B, C, D, E, F, G, H, W[0], 0x428A2F98); + P(H, A, B, C, D, E, F, G, W[1], 0x71374491); + P(G, H, A, B, C, D, E, F, W[2], 0xB5C0FBCF); + P(F, G, H, A, B, C, D, E, W[3], 0xE9B5DBA5); + P(E, F, G, H, A, B, C, D, W[4], 0x3956C25B); + P(D, E, F, G, H, A, B, C, W[5], 0x59F111F1); + P(C, D, E, F, G, H, A, B, W[6], 0x923F82A4); + P(B, C, D, E, F, G, H, A, W[7], 0xAB1C5ED5); + P(A, B, C, D, E, F, G, H, W[8], 0xD807AA98); + P(H, A, B, C, D, E, F, G, W[9], 0x12835B01); + P(G, H, A, B, C, D, E, F, W[10], 0x243185BE); + P(F, G, H, A, B, C, D, E, W[11], 0x550C7DC3); + P(E, F, G, H, A, B, C, D, W[12], 0x72BE5D74); + P(D, E, F, G, H, A, B, C, W[13], 0x80DEB1FE); + P(C, D, E, F, G, H, A, B, W[14], 0x9BDC06A7); + P(B, C, D, E, F, G, H, A, W[15], 0xC19BF174); + P(A, B, C, D, E, F, G, H, R(16), 0xE49B69C1); + P(H, A, B, C, D, E, F, G, R(17), 0xEFBE4786); + P(G, H, A, B, C, D, E, F, R(18), 0x0FC19DC6); + P(F, G, H, A, B, C, D, E, R(19), 0x240CA1CC); + P(E, F, G, H, A, B, C, D, R(20), 0x2DE92C6F); + P(D, E, F, G, H, A, B, C, R(21), 0x4A7484AA); + P(C, D, E, F, G, H, A, B, R(22), 0x5CB0A9DC); + P(B, C, D, E, F, G, H, A, R(23), 0x76F988DA); + P(A, B, C, D, E, F, G, H, R(24), 0x983E5152); + P(H, A, B, C, D, E, F, G, R(25), 0xA831C66D); + P(G, H, A, B, C, D, E, F, R(26), 0xB00327C8); + P(F, G, H, A, B, C, D, E, R(27), 0xBF597FC7); + P(E, F, G, H, A, B, C, D, R(28), 0xC6E00BF3); + P(D, E, F, G, H, A, B, C, R(29), 0xD5A79147); + P(C, D, E, F, G, H, A, B, R(30), 0x06CA6351); + P(B, C, D, E, F, G, H, A, R(31), 0x14292967); + P(A, B, C, D, E, F, G, H, R(32), 0x27B70A85); + P(H, A, B, C, D, E, F, G, R(33), 0x2E1B2138); + P(G, H, A, B, C, D, E, F, R(34), 0x4D2C6DFC); + P(F, G, H, A, B, C, D, E, R(35), 0x53380D13); + P(E, F, G, H, A, B, C, D, R(36), 0x650A7354); + P(D, E, F, G, H, A, B, C, R(37), 0x766A0ABB); + P(C, D, E, F, G, H, A, B, R(38), 0x81C2C92E); + P(B, C, D, E, F, G, H, A, R(39), 0x92722C85); + P(A, B, C, D, E, F, G, H, R(40), 0xA2BFE8A1); + P(H, A, B, C, D, E, F, G, R(41), 0xA81A664B); + P(G, H, A, B, C, D, E, F, R(42), 0xC24B8B70); + P(F, G, H, A, B, C, D, E, R(43), 0xC76C51A3); + P(E, F, G, H, A, B, C, D, R(44), 0xD192E819); + P(D, E, F, G, H, A, B, C, R(45), 0xD6990624); + P(C, D, E, F, G, H, A, B, R(46), 0xF40E3585); + P(B, C, D, E, F, G, H, A, R(47), 0x106AA070); + P(A, B, C, D, E, F, G, H, R(48), 0x19A4C116); + P(H, A, B, C, D, E, F, G, R(49), 0x1E376C08); + P(G, H, A, B, C, D, E, F, R(50), 0x2748774C); + P(F, G, H, A, B, C, D, E, R(51), 0x34B0BCB5); + P(E, F, G, H, A, B, C, D, R(52), 0x391C0CB3); + P(D, E, F, G, H, A, B, C, R(53), 0x4ED8AA4A); + P(C, D, E, F, G, H, A, B, R(54), 0x5B9CCA4F); + P(B, C, D, E, F, G, H, A, R(55), 0x682E6FF3); + P(A, B, C, D, E, F, G, H, R(56), 0x748F82EE); + P(H, A, B, C, D, E, F, G, R(57), 0x78A5636F); + P(G, H, A, B, C, D, E, F, R(58), 0x84C87814); + P(F, G, H, A, B, C, D, E, R(59), 0x8CC70208); + P(E, F, G, H, A, B, C, D, R(60), 0x90BEFFFA); + P(D, E, F, G, H, A, B, C, R(61), 0xA4506CEB); + P(C, D, E, F, G, H, A, B, R(62), 0xBEF9A3F7); + P(B, C, D, E, F, G, H, A, R(63), 0xC67178F2); + + ctx->state[0] += A; + ctx->state[1] += B; + ctx->state[2] += C; + ctx->state[3] += D; + ctx->state[4] += E; + ctx->state[5] += F; + ctx->state[6] += G; + ctx->state[7] += H; +} + +void sha256_update(sha256_context * ctx, uint8_t * input, uint32_t length) +{ + uint32_t left, fill; + + if (!length) + return; + + left = ctx->total[0] & 0x3F; + fill = 64 - left; + + ctx->total[0] += length; + ctx->total[0] &= 0xFFFFFFFF; + + if (ctx->total[0] < length) + ctx->total[1]++; + + if (left && length >= fill) { + memcpy((void *) (ctx->buffer + left), (void *) input, fill); + sha256_process(ctx, ctx->buffer); + length -= fill; + input += fill; + left = 0; + } + + while (length >= 64) { + sha256_process(ctx, input); + length -= 64; + input += 64; + } + + if (length) + memcpy((void *) (ctx->buffer + left), (void *) input, length); +} + +static uint8_t sha256_padding[64] = { + 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +}; + +void sha256_finish(sha256_context * ctx, uint8_t digest[32]) +{ + uint32_t last, padn; + uint32_t high, low; + uint8_t msglen[8]; + + high = ((ctx->total[0] >> 29) + | (ctx->total[1] << 3)); + low = (ctx->total[0] << 3); + + PUT_UINT32_BE(high, msglen, 0); + PUT_UINT32_BE(low, msglen, 4); + + last = ctx->total[0] & 0x3F; + padn = (last < 56) ? (56 - last) : (120 - last); + + sha256_update(ctx, sha256_padding, padn); + sha256_update(ctx, msglen, 8); + + PUT_UINT32_BE(ctx->state[0], digest, 0); + PUT_UINT32_BE(ctx->state[1], digest, 4); + PUT_UINT32_BE(ctx->state[2], digest, 8); + PUT_UINT32_BE(ctx->state[3], digest, 12); + PUT_UINT32_BE(ctx->state[4], digest, 16); + PUT_UINT32_BE(ctx->state[5], digest, 20); + PUT_UINT32_BE(ctx->state[6], digest, 24); + PUT_UINT32_BE(ctx->state[7], digest, 28); +} |