diff options
author | sternenseemann <0rpkxez4ksa01gb3typccl0i@systemli.org> | 2021-04-19 00:24:41 +0200 |
---|---|---|
committer | sterni <sternenseemann@systemli.org> | 2021-06-16 22:28:25 +0200 |
commit | f83032b647d678cd88259987d5a62ddc1f9c3247 (patch) | |
tree | b29e6efefbffd750d68ab08fc79bd4728f9d0ae3 | |
parent | 922f7d2a4ea81a08d0bafe2265c57851eb9aceb3 (diff) |
feat(warteraum): read tokens and salt from files
Instead of compiling salt and tokens in, read both from files that are specified as environment variables WARTERAUM_SALT_FILE and WARTERAUM_TOKENS_FILE. hashtoken has also been adjusted to read the salt from a specified salt file (the first argument). It also now outputs the token in raw form instead of C syntax. Intended usage is hashtoken `/path/to/salt token >> /path/to/tokens`. Disadavantage of this is that deleting tokens is somewhat cumbersome and only really doable with a hex editor. The NixOS service now expects saltFile and tokensFile instead of salt and tokens as a string and a list of strings respectively.
-rw-r--r-- | .gitignore | 1 | ||||
-rw-r--r-- | README.adoc | 48 | ||||
-rw-r--r-- | default.nix | 9 | ||||
-rw-r--r-- | nix/warteraum.nix | 45 | ||||
-rw-r--r-- | nixos/flipdot-gschichtler.nix | 40 | ||||
-rw-r--r-- | warteraum/GNUmakefile | 14 | ||||
-rw-r--r-- | warteraum/auth.c | 69 | ||||
-rw-r--r-- | warteraum/auth.h | 16 | ||||
-rw-r--r-- | warteraum/hashtoken.c | 37 | ||||
-rw-r--r-- | warteraum/http_string.c | 64 | ||||
-rw-r--r-- | warteraum/http_string.h | 9 | ||||
-rw-r--r-- | warteraum/main.c | 6 | ||||
-rwxr-xr-x | warteraum/test/integration | 13 | ||||
-rwxr-xr-x | warteraum/test/test_integration.py | 8 | ||||
-rw-r--r-- | warteraum/tokens.h | 6 |
15 files changed, 254 insertions, 131 deletions
diff --git a/.gitignore b/.gitignore index 606083f..e4d42bb 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,7 @@ vgcore.* /warteraum/warteraum /warteraum/hashtoken /warteraum/test/*.test +/warteraum/test/*.tmp /warteraum/test/valgrind-log.txt /bahnhofshalle/node_modules diff --git a/README.adoc b/README.adoc index ee46939..e13439e 100644 --- a/README.adoc +++ b/README.adoc @@ -271,12 +271,8 @@ ready to be installed: * `bahnhofshalle` * `anzeigetafel` -The warteraum attributes can be overriden to set the following -values: - -* `apiTokens`: A list of api tokens to allow to authenticate with -* `scryptSalt`: A string of hexadecimal digits which make up the - salt to use when hashing api tokens. +Configuration +~~~~~~~~~~~~~ `nixos/flipdot-gschichtler.nix` provides a NixOS module which defines `services.flipdot-gschichtler` to conveniently set up @@ -302,8 +298,8 @@ in { services.flipdot-gschichtler = { enable = true; virtualHost = "flipdot.openlab-augsburg.de"; - apiTokens = [ ... ]; - salt = "..."; + tokensFile = "/var/secrets/flipdot-gschichtler/tokens"; + saltFile = "/var/secrets/flipdot-gschichtler/salt"; }; services.nginx.enable = true; @@ -313,9 +309,45 @@ in { } --------------- +warteraum +^^^^^^^^^ + +`warteraum` is configured via environment variables (which the NixOS +module utilizes): + +* `WARTERAUM_SALT_FILE`: A file containing random data to use as salt +* `WARTERAUM_TOKENS_FILE`: API tokens hashed using `scrypt` + +To generate the tokens file, `warteraum` ships a utility tool. +Setting up auth works like this: + +------------------- +$ head -c 512 /dev/urandom > $WARTERAUM_SALT_FILE +$ hashtoken $WARTERAUM_SALT_FILE token1 >> $WARTERAUM_TOKENS_FILE +$ hashtoken $WARTERAUM_SALT_FILE token2 >> $WARTERAUM_TOKENS_FILE +------------------- + +Now `warteraum` would accept “token1” and “token2” when authenticating. +Note that `hashtoken` only supports appending tokens in a +convenient fashion at the moment. Removing tokens is quite cumbersome +and only possible with a knowledge of `warteraum` internals. + Changelog --------- +2.1.0 (WIP) +~~~~~~~~~~~ + +* `warteraum` +** Limit size of request bodies to prevent DoS attacks +** Trim whitespace on input text +** Instead of compiling in salt and tokens, read them from the + files specified via the `WARTERAUM_SALT_FILE` and + `WARTERAUM_TOKENS_FILE` environment variables. In the NixOS + service this is reflected by the usage of `saltFile` and + `tokensFile` respectively over the previous `salt` and + `tokens`. + 2.0.0 ~~~~~ diff --git a/default.nix b/default.nix index dd12d14..4021a7b 100644 --- a/default.nix +++ b/default.nix @@ -1,7 +1,4 @@ -{ pkgs ? (import ./nix/nixpkgs-pinned.nix { }) -, scryptSalt ? null -, apiTokens ? null -}: +{ pkgs ? (import ./nix/nixpkgs-pinned.nix { }) }: let gi = pkgs.nix-gitignore; @@ -22,13 +19,13 @@ in rec { warteraum-static = pkgs.pkgsStatic.callPackage ./nix/warteraum.nix { inherit (pkgs.pkgsStatic.llvmPackages) stdenv; - inherit scryptSalt apiTokens rootSrc sourceName; + inherit rootSrc sourceName; inherit (python3.pkgs) pytest pytest-randomly requests flipdot-gschichtler; }; warteraum = pkgs.callPackage ./nix/warteraum.nix { inherit (pkgs.llvmPackages_latest) stdenv; - inherit scryptSalt apiTokens rootSrc sourceName; + inherit rootSrc sourceName; inherit (python3.pkgs) pytest pytest-randomly requests flipdot-gschichtler; }; diff --git a/nix/warteraum.nix b/nix/warteraum.nix index f935165..af954a7 100644 --- a/nix/warteraum.nix +++ b/nix/warteraum.nix @@ -1,41 +1,8 @@ { stdenv, lib, scrypt , jq, requests, pytest, pytest-randomly, flipdot-gschichtler, valgrind -, scryptSalt ? null, apiTokens ? null , rootSrc, sourceName }: -let - stringSegments = n: s: - let - stringSplitter = i: - builtins.substring (i * 2) n s; - nonempty = s: builtins.stringLength s != 0; - in - builtins.filter nonempty (builtins.genList - stringSplitter ((builtins.stringLength s / n) + 1)); - - saltBytes = stringSegments 2 scryptSalt; - saltArray = - let - commas = builtins.foldl' (a: b: a + ", 0x" + b) "" saltBytes; - in builtins.substring 1 (builtins.stringLength commas) commas; - - saltReplace = lib.optionalString (scryptSalt != null) '' - sed -i '/^ 0x/d' auth.h - sed -i '/const uint8_t salt/a\${saltArray}' auth.h - ''; - - tokensReplace = lib.optionalString (apiTokens != null) '' - sed -i '/^ {/d' tokens.h - sed -i '/^};/d' tokens.h - make hashtoken - ${lib.concatMapStringsSep "\n" - (x: "./hashtoken ${x} >> tokens.h; echo -n ', ' >> tokens.h") apiTokens} - echo "};" >> tokens.h - ''; - -in - stdenv.mkDerivation rec { pname = "warteraum"; version = import ./version.nix; @@ -48,18 +15,8 @@ stdenv.mkDerivation rec { "PREFIX=${placeholder "out"}" ]; - #postUnpack = '' - # chmod -R u+w "$sourceRoot/.." - #''; - - patchPhase = '' - runHook prePatch - + postPatch = '' patchShebangs test/integration - ${saltReplace} - ${tokensReplace} - - runHook postPatch ''; doCheck = true; diff --git a/nixos/flipdot-gschichtler.nix b/nixos/flipdot-gschichtler.nix index 33373e9..f94fe01 100644 --- a/nixos/flipdot-gschichtler.nix +++ b/nixos/flipdot-gschichtler.nix @@ -4,11 +4,10 @@ with lib; let cfg = config.services.flipdot-gschichtler; - fg = flipdot-gschichtler; - withTokens = fg.warteraum-static.override { - inherit (cfg) apiTokens; - scryptSalt = cfg.salt; - }; + inherit (flipdot-gschichtler) + bahnhofshalle + warteraum-static + ; in { options = { services.flipdot-gschichtler = { @@ -23,21 +22,23 @@ in { ''; }; - salt = mkOption { + saltFile = mkOption { type = types.str; description = '' - Salt to use for hashing API tokens using scrypt_kdf(3). - Must be a string of hexadecimals which has a multiple of - 2 as a length. + File of random data to use as salt for storing + API tokens. Using a path here will copy secrets + into the nix store! ''; }; - apiTokens = mkOption { - type = types.listOf types.str; - default = []; + tokensFile = mkOption { + type = types.path; description = '' - List of API tokens to allow access. - May be strings of any length. + File containing authorized API tokens which + can be created using + <literal>''${warteraum}/bin/hashtoken</literal>. + Using a path here will copy secrets into the + nix store! ''; }; }; @@ -49,12 +50,17 @@ in { after = [ "network.target" ]; wantedBy = [ "multi-user.target" ]; + environment = { + WARTERAUM_SALT_FILE = cfg.saltFile; + WARTERAUM_TOKENS_FILE = cfg.tokensFile; + }; + serviceConfig = { Type = "simple"; - ExecStart = "${withTokens}/bin/warteraum"; + ExecStart = "${warteraum-static}/bin/warteraum"; InAccessibleDirectories = "/"; # mmap and munmap are used by libscrypt-kdf - SystemCallFilter = "@default @basic-io @io-event @network-io fcntl @signal @process @timer brk mmap munmap"; + SystemCallFilter = "@default @basic-io @io-event @network-io fcntl @signal @process @timer brk mmap munmap open"; SystemCallArchitectures = "native"; CapabilityBoundingSet = ""; @@ -81,7 +87,7 @@ in { services.nginx.virtualHosts."${cfg.virtualHost}" = { enableACME = true; forceSSL = true; - root = fg.bahnhofshalle; + root = bahnhofshalle; extraConfig = '' location /api { proxy_pass http://127.0.0.1:9000/api; diff --git a/warteraum/GNUmakefile b/warteraum/GNUmakefile index 6ad1ce4..01b907e 100644 --- a/warteraum/GNUmakefile +++ b/warteraum/GNUmakefile @@ -20,10 +20,10 @@ TEST_BINS = test/emitjson.test test/queue.test test/form.test test/routing.test all: warteraum hashtoken -warteraum: emitjson.o queue.o routing.o form.o auth.o main.o +warteraum: http_string.o emitjson.o queue.o routing.o form.o auth.o main.o $(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS) -hashtoken: hashtoken.o +hashtoken: hashtoken.o http_string.o $(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS) main.o: main.c queue.h routing.h form.h v1_static.h emitjson.h \ @@ -33,9 +33,11 @@ form.o: form.c http_string.h $(HTTPSERVER) routing.o: routing.c $(HTTPSERVER) -hashtoken.o: hashtoken.c auth.h +hashtoken.o: hashtoken.c auth.h http_string.h $(HTTPSERVER) -auth.o: auth.h tokens.h http_string.h $(HTTPSERVER) +auth.o: auth.c auth.h http_string.h $(HTTPSERVER) + +http_string.o: http_string.c http_string.h $(HTTPSERVER) install: all install -Dm755 hashtoken -t $(BINDIR) @@ -45,14 +47,16 @@ clean: rm -f warteraum hashtoken rm -f *.o rm -f test/*.o + rm -f test/*.tmp rm -f $(TEST_BINS) + rm -rf test/__pycache__ test/%.o: http_string.h test/%.test: %.o test/test_%.o $(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS) -check: warteraum $(TEST_BINS) +check: warteraum hashtoken $(TEST_BINS) @echo == Running unit tests for t in $(TEST_BINS); do ./$$t; done diff --git a/warteraum/auth.c b/warteraum/auth.c index dcf0454..b0ccbbd 100644 --- a/warteraum/auth.c +++ b/warteraum/auth.c @@ -1,26 +1,77 @@ -#include "tokens.h" +#include <errno.h> +#include <stdio.h> +#include <stdlib.h> + #include "auth.h" +#include "http_string.h" + +// buffer containing the currently used salt +static struct http_string_s salt; +// buffer containing _all_ hashed tokens. This works because +// they have a fixed length in their hashed form, so we can +// easily loop through them. +static struct http_string_s tokens; + +bool auth_init() { + http_string_clear(&salt); + http_string_clear(&tokens); + + char *salt_file = getenv("WARTERAUM_SALT_FILE"); + char *tokens_file = getenv("WARTERAUM_TOKENS_FILE"); + + if(salt_file != NULL && tokens_file != NULL) { + errno = 0; -#define HASH_TOKEN(token, size, output) \ - scrypt_kdf((const uint8_t *) token, size, salt, sizeof(salt), \ - SCRYPT_N, SCRYPT_r, SCRYPT_p, output, SCRYPT_OUTPUT_LEN) + salt = http_string_fread(salt_file); + if(errno != 0) { + perror("Error: Failed reading WARTERAUM_SALT_FILE"); + return false; + } + + tokens = http_string_fread(tokens_file); + if(errno != 0) { + perror("Error: Failed reading WARTERAUM_TOKENS_FILE"); + return false; + } + + return true; + } else { + fputs("Warning: Missing necessary file(s) to setup auth\n", stderr); + return false; + } +} + +void auth_cleanup() { + http_string_free(&salt); + http_string_free(&tokens); +} bool auth_verify(struct http_string_s token) { + if(HTTP_STRING_EMPTY(salt) || HTTP_STRING_EMPTY(tokens)) { + return false; + } + uint8_t hashed[SCRYPT_OUTPUT_LEN]; - int hash_result = HASH_TOKEN(token.buf, token.len, hashed); + int hash_result = HASH_TOKEN(salt, token, hashed); if(hash_result != 0) { return false; } bool token_matches = false; - size_t token_count = sizeof(tokens) / (sizeof(uint8_t) * SCRYPT_OUTPUT_LEN); + int token_count = tokens.len / SCRYPT_OUTPUT_LEN; - for(size_t i = 0; i < token_count && !token_matches; i++) { + if(token_count * SCRYPT_OUTPUT_LEN != tokens.len) + fputs("Warning: length of tokens file is not a whole multiple of SCRYPT_OUTPUT_LEN\n", stderr); + + for(int i = 0; i < token_count && !token_matches; i++) { token_matches = true; - for(size_t j = 0; j < SCRYPT_OUTPUT_LEN && token_matches; j++) { - token_matches = tokens[i][j] == hashed[j]; + const char *expected = tokens.buf + (i * SCRYPT_OUTPUT_LEN); + + // hopefully constant time equality + for(int j = 0; j < SCRYPT_OUTPUT_LEN; j++) { + token_matches &= hashed[j] == (uint8_t) expected[j]; } } diff --git a/warteraum/auth.h b/warteraum/auth.h index 8379694..0a9f56c 100644 --- a/warteraum/auth.h +++ b/warteraum/auth.h @@ -13,18 +13,14 @@ #define SCRYPT_r 8 #define SCRYPT_p 1 -// FIXME change for production -static const uint8_t salt[] = { - 0x56, 0x02, 0xe9, 0xda, 0x68, 0x60, 0xfb, 0x20, 0xde, 0xa2, 0x6c, 0x9d, 0x68, 0xb4, 0x48, 0x28, - 0x42, 0x83, 0x38, 0xff, 0x5b, 0x5a, 0xb3, 0x87, 0x90, 0x8d, 0xff, 0xb5, 0x7e, 0x3c, 0x37, 0x2b, - 0x9b, 0x40, 0x18, 0x70, 0x94, 0x18, 0x86, 0x91, 0x9d, 0xa9, 0xda, 0x2e, 0x36, 0xdc, 0xd3, 0x56, - 0x1d, 0x9b, 0xd1, 0xa0, 0xce, 0xcd, 0x86, 0xe7, 0xac, 0x7c, 0xfa, 0xd1, 0x46, 0xa3, 0x56, 0x51, -}; - -#define HASH_TOKEN(token, size, output) \ - scrypt_kdf((const uint8_t *) token, size, salt, sizeof(salt), \ +#define HASH_TOKEN(salt, tok, output) \ + scrypt_kdf((const uint8_t *) token.buf, token.len, \ + (const uint8_t *) salt.buf, salt.len, \ SCRYPT_N, SCRYPT_r, SCRYPT_p, output, SCRYPT_OUTPUT_LEN) +bool auth_init(); +void auth_cleanup(); + bool auth_verify(struct http_string_s token); #endif diff --git a/warteraum/hashtoken.c b/warteraum/hashtoken.c index 24ac225..3d8dd5c 100644 --- a/warteraum/hashtoken.c +++ b/warteraum/hashtoken.c @@ -1,36 +1,41 @@ +#include <errno.h> #include <stdio.h> #include <stdint.h> #include <string.h> + +#include "http_string.h" #include "auth.h" int main(int argc, char **argv) { - if(argc != 2) { + if(argc != 3) { fputs("Usage: ", stderr); fputs(argv[0], stderr); - fputs(" TOKEN_STRING", stderr); + fputs(" SALT_FILE TOKEN_STRING", stderr); return 1; } - const char *t = argv[1]; - size_t t_len = strlen(t); + errno = 0; - uint8_t output[SCRYPT_OUTPUT_LEN]; + struct http_string_s salt = http_string_fread(argv[1]); - int res = HASH_TOKEN(t, t_len, output); + if(errno != 0) { + perror("Couldn't read the salt file"); + return 1; + } - if(res == 0) { - putchar('{'); - for(size_t i = 0; i < SCRYPT_OUTPUT_LEN; i++) { - printf(" 0x%x", output[i]); + struct http_string_s token; - if(SCRYPT_OUTPUT_LEN != i + 1) { - fputs(",", stdout); - } - } + token.buf = argv[2]; + token.len = strlen(token.buf); - puts(" }"); + uint8_t output[SCRYPT_OUTPUT_LEN]; + + int res = HASH_TOKEN(salt, token, output); + + if(res == 0) { + size_t written = fwrite(output, sizeof(uint8_t), SCRYPT_OUTPUT_LEN, stdout); - return 0; + return !(written == SCRYPT_OUTPUT_LEN); } else { return 1; } diff --git a/warteraum/http_string.c b/warteraum/http_string.c new file mode 100644 index 0000000..31552db --- /dev/null +++ b/warteraum/http_string.c @@ -0,0 +1,64 @@ +#include <errno.h> +#include <stdio.h> +#include <stdlib.h> + +#include "http_string.h" + +#define BASE_BUF_SIZE 1024 + +void http_string_clear(struct http_string_s *s) { + s->buf = NULL; + s->len = 0; +} + +void http_string_free(struct http_string_s *s) { + if(s->buf != NULL) + free((void *) s->buf); + + http_string_clear(s); +} + +struct http_string_s http_string_fread(char *path) { + struct http_string_s out; + http_string_clear(&out); + + FILE *f = fopen(path, "r"); + + if(f == NULL) + return out; + + char *buf = NULL; + size_t cap = BASE_BUF_SIZE; + size_t pos = 0; + size_t read = 0; + + do { + char *tmp = realloc(buf, cap); + + if(tmp == NULL) { + if(buf != NULL) { + fclose(f); + free(buf); + } + + errno = ENOMEM; + return out; + } + + buf = tmp; + + read = fread(buf + pos, sizeof(char), cap - pos, f); + pos += read; + } while(read > 0 && !(feof(f) || ferror(f))); + + if(!ferror(f)) { + out.len = pos; + out.buf = buf; + } else { + free(buf); + } + + fclose(f); + + return out; +} diff --git a/warteraum/http_string.h b/warteraum/http_string.h index ee47aa6..ebdfc84 100644 --- a/warteraum/http_string.h +++ b/warteraum/http_string.h @@ -2,6 +2,7 @@ #define WARTERAUM_HTTP_STRING_H #include <string.h> +#include "../third_party/httpserver.h/httpserver.h" #define STATIC_HTTP_STRING(s) \ { s, sizeof(s) - 1 } @@ -12,4 +13,12 @@ #define HTTP_STRING_IS(a, s) \ (a.len == sizeof(s) - 1 && strncmp(a.buf, s, a.len) == 0) +#define HTTP_STRING_EMPTY(s) (s.len <= 0 || s.buf == NULL) + +void http_string_clear(struct http_string_s *s); + +void http_string_free(struct http_string_s *s); + +struct http_string_s http_string_fread(char *path); + #endif diff --git a/warteraum/main.c b/warteraum/main.c index d4be5df..0cca576 100644 --- a/warteraum/main.c +++ b/warteraum/main.c @@ -43,6 +43,8 @@ void cleanup(int signum) { if(signum == SIGTERM || signum == SIGINT) { queue_free(flip_queue); free(server); + auth_cleanup(); + exit(EXIT_SUCCESS); } } @@ -447,6 +449,10 @@ void handle_request(http_request_t *request) { int main(void) { queue_new(&flip_queue); + if(!auth_init()) { + fputs("Warning: Couldn't setup auth, won't accept any credentials\n", stderr); + } + signal(SIGTERM, cleanup); signal(SIGINT, cleanup); diff --git a/warteraum/test/integration b/warteraum/test/integration index a24ca3f..7329982 100755 --- a/warteraum/test/integration +++ b/warteraum/test/integration @@ -4,7 +4,16 @@ set -e cd "$(dirname "$0")" if command -v pytest > /dev/null; then - rm -f valgrind-log.txt + export WARTERAUM_SALT_FILE=./salt.tmp + export WARTERAUM_TOKENS_FILE=./tokens.tmp + + rm -f valgrind-log.txt "$WARTERAUM_SALT_FILE" "$WARTERAUM_TOKENS_FILE" + + head -c 100 /dev/urandom > "$WARTERAUM_SALT_FILE" + + ../hashtoken "$WARTERAUM_SALT_FILE" random > "$WARTERAUM_TOKENS_FILE" + ../hashtoken "$WARTERAUM_SALT_FILE" hannes >> "$WARTERAUM_TOKENS_FILE" + ../hashtoken "$WARTERAUM_SALT_FILE" lol123 >> "$WARTERAUM_TOKENS_FILE" valgrind \ --log-file=./valgrind-log.txt \ @@ -17,7 +26,7 @@ if command -v pytest > /dev/null; then sleep 3 - pytest ./test_integration.py + pytest -v ./test_integration.py kill $pid diff --git a/warteraum/test/test_integration.py b/warteraum/test/test_integration.py index 0eabf7a..642b9d5 100755 --- a/warteraum/test/test_integration.py +++ b/warteraum/test/test_integration.py @@ -5,9 +5,6 @@ import sys BASE_URL = 'http://localhost:9000' TOKEN = 'hannes' - -# technically somebody could use these tokens, -# but tbh they deserve this test to fail then WRONG_TOKENS = [ 'password', 'admin', '12345678' ] MAX_TEXT_LEN = 512 @@ -38,11 +35,6 @@ def test_queue_del_format(): r = requests.delete('{}/api/v2/queue/{}'.format(BASE_URL, my_id), data = { 'token' : TOKEN }) - # accept unauthorized (we can't know the token if a custom one was set - # via the nix derivation arguments) - if r.status_code == 401: - pytest.xfail('invalid API token is configured') - assert r.status_code == 204 def test_queue_404_format(): diff --git a/warteraum/tokens.h b/warteraum/tokens.h deleted file mode 100644 index 37da0a5..0000000 --- a/warteraum/tokens.h +++ /dev/null @@ -1,6 +0,0 @@ -#include "auth.h" - -const uint8_t tokens[][SCRYPT_OUTPUT_LEN] = { - { 0x2f, 0x75, 0x87, 0xa2, 0xbe, 0x1, 0x0, 0xe7, 0x1, 0xee, 0x15, 0x71, 0xd6, 0xc3, 0xf3, 0x9b, 0x44, 0x31, 0xaa, 0x11, 0x8e, 0x38, 0xa7, 0x90, 0xf8, 0xcd, 0xfc, 0x9d, 0xed, 0x5, 0x82, 0x8e }, - { 0x87, 0x46, 0x7a, 0x87, 0x5c, 0x54, 0xf7, 0x11, 0xdb, 0x6, 0xba, 0xe2, 0x9c, 0x40, 0xe7, 0x57, 0x6a, 0x1a, 0xf6, 0x3b, 0x3e, 0x6, 0x79, 0x30, 0xfd, 0x1f, 0xc4, 0xe7, 0xad, 0x8f, 0x4, 0x88 } -}; |