From 2bba8f9f5a7ea8af619a0df0afb4f36f3e00013a Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Wed, 3 Apr 2024 06:58:25 +1100 Subject: [PATCH] secp256k1-sys: Vendor latest secp256k1 Vendor the latest secp256k1 `v0.4.1`. Bump the version number of `secp256k1-sys` to `v0.10.0` and run the vendor script. Also depend on the new version in `rust-secp256k1`, and add a changelog entry. --- Cargo-minimal.lock | 2 +- Cargo-recent.lock | 2 +- Cargo.toml | 2 +- secp256k1-sys/CHANGELOG.md | 5 +- secp256k1-sys/Cargo.toml | 4 +- .../depend/secp256k1-HEAD-revision.txt | 2 +- secp256k1-sys/depend/secp256k1/.cirrus.yml | 95 + .../actions/run-in-docker-action/action.yml | 6 +- .../depend/secp256k1/.github/workflows/ci.yml | 6 +- secp256k1-sys/depend/secp256k1/CHANGELOG.md | 11 +- secp256k1-sys/depend/secp256k1/CMakeLists.txt | 14 +- .../depend/secp256k1/CONTRIBUTING.md | 107 + secp256k1-sys/depend/secp256k1/Makefile.am | 36 +- secp256k1-sys/depend/secp256k1/README.md | 32 +- secp256k1-sys/depend/secp256k1/ci/ci.sh | 16 +- .../secp256k1/ci/linux-debian.Dockerfile | 10 +- .../cmake/GeneratePkgConfigFile.cmake | 8 + .../secp256k1/cmake/TryAppendCFlags.cmake | 4 +- secp256k1-sys/depend/secp256k1/configure.ac | 18 +- .../secp256k1/contrib/lax_der_parsing.c | 14 +- .../secp256k1/contrib/lax_der_parsing.h | 10 +- .../contrib/lax_der_privatekey_parsing.c | 14 +- .../contrib/lax_der_privatekey_parsing.h | 8 +- .../depend/secp256k1/doc/ellswift.md | 12 +- .../depend/secp256k1/doc/release-process.md | 11 +- .../depend/secp256k1/examples/ecdh.c | 26 +- .../depend/secp256k1/examples/ecdsa.c | 34 +- .../depend/secp256k1/examples/schnorr.c | 40 +- .../depend/secp256k1/include/secp256k1.h | 302 +- .../depend/secp256k1/include/secp256k1_ecdh.h | 26 +- .../secp256k1/include/secp256k1_ellswift.h | 52 +- .../secp256k1/include/secp256k1_extrakeys.h | 114 +- .../include/secp256k1_preallocated.h | 62 +- .../secp256k1/include/secp256k1_recovery.h | 48 +- .../secp256k1/include/secp256k1_schnorrsig.h | 58 +- .../secp256k1/sage/gen_exhaustive_groups.sage | 6 +- .../sage/gen_split_lambda_constants.sage | 16 +- .../sage/prove_group_implementations.sage | 50 +- .../depend/secp256k1/src/CMakeLists.txt | 50 +- .../secp256k1/src/asm/field_10x26_arm.s | 21 +- .../depend/secp256k1/src/assumptions.h | 2 +- secp256k1-sys/depend/secp256k1/src/bench.c | 40 +- .../depend/secp256k1/src/bench_ecmult.c | 122 +- .../depend/secp256k1/src/bench_internal.c | 195 +- .../depend/secp256k1/src/ctime_tests.c | 70 +- secp256k1-sys/depend/secp256k1/src/ecdsa.h | 8 +- .../depend/secp256k1/src/ecdsa_impl.h | 135 +- secp256k1-sys/depend/secp256k1/src/eckey.h | 12 +- .../depend/secp256k1/src/eckey_impl.h | 66 +- secp256k1-sys/depend/secp256k1/src/ecmult.h | 6 +- .../secp256k1/src/ecmult_compute_table.h | 6 +- .../secp256k1/src/ecmult_compute_table_impl.h | 36 +- .../depend/secp256k1/src/ecmult_const.h | 14 +- .../depend/secp256k1/src/ecmult_const_impl.h | 451 +- .../depend/secp256k1/src/ecmult_gen.h | 14 +- .../secp256k1/src/ecmult_gen_compute_table.h | 2 +- .../src/ecmult_gen_compute_table_impl.h | 42 +- .../depend/secp256k1/src/ecmult_gen_impl.h | 86 +- .../depend/secp256k1/src/ecmult_impl.h | 406 +- secp256k1-sys/depend/secp256k1/src/field.h | 159 +- .../depend/secp256k1/src/field_10x26.h | 4 +- .../depend/secp256k1/src/field_10x26_impl.h | 128 +- .../depend/secp256k1/src/field_5x52.h | 4 +- .../secp256k1/src/field_5x52_asm_impl.h | 504 -- .../depend/secp256k1/src/field_5x52_impl.h | 120 +- .../secp256k1/src/field_5x52_int128_impl.h | 163 +- .../depend/secp256k1/src/field_impl.h | 428 +- secp256k1-sys/depend/secp256k1/src/group.h | 104 +- .../depend/secp256k1/src/group_impl.h | 792 +-- secp256k1-sys/depend/secp256k1/src/hash.h | 26 +- .../depend/secp256k1/src/hash_impl.h | 160 +- secp256k1-sys/depend/secp256k1/src/int128.h | 38 +- .../depend/secp256k1/src/int128_native.h | 4 +- .../depend/secp256k1/src/int128_native_impl.h | 38 +- .../depend/secp256k1/src/int128_struct.h | 4 +- .../depend/secp256k1/src/int128_struct_impl.h | 68 +- secp256k1-sys/depend/secp256k1/src/modinv32.h | 14 +- .../depend/secp256k1/src/modinv32_impl.h | 227 +- secp256k1-sys/depend/secp256k1/src/modinv64.h | 14 +- .../depend/secp256k1/src/modinv64_impl.h | 456 +- .../src/modules/ecdh/Makefile.am.include | 2 +- .../secp256k1/src/modules/ecdh/bench_impl.h | 12 +- .../secp256k1/src/modules/ecdh/main_impl.h | 46 +- .../secp256k1/src/modules/ecdh/tests_impl.h | 99 +- .../src/modules/ellswift/Makefile.am.include | 2 +- .../src/modules/ellswift/bench_impl.h | 26 +- .../src/modules/ellswift/main_impl.h | 389 +- .../modules/ellswift/tests_exhaustive_impl.h | 20 +- .../src/modules/ellswift/tests_impl.h | 214 +- .../src/modules/extrakeys/Makefile.am.include | 2 +- .../src/modules/extrakeys/main_impl.h | 162 +- .../modules/extrakeys/tests_exhaustive_impl.h | 46 +- .../src/modules/extrakeys/tests_impl.h | 573 +-- .../src/modules/recovery/Makefile.am.include | 2 +- .../src/modules/recovery/bench_impl.h | 16 +- .../src/modules/recovery/main_impl.h | 116 +- .../modules/recovery/tests_exhaustive_impl.h | 80 +- .../src/modules/recovery/tests_impl.h | 267 +- .../modules/schnorrsig/Makefile.am.include | 2 +- .../src/modules/schnorrsig/bench_impl.h | 30 +- .../src/modules/schnorrsig/main_impl.h | 174 +- .../schnorrsig/tests_exhaustive_impl.h | 66 +- .../src/modules/schnorrsig/tests_impl.h | 351 +- .../depend/secp256k1/src/precompute_ecmult.c | 18 +- .../secp256k1/src/precompute_ecmult_gen.c | 6 +- .../depend/secp256k1/src/precomputed_ecmult.c | 8 +- .../depend/secp256k1/src/precomputed_ecmult.h | 8 +- .../secp256k1/src/precomputed_ecmult_gen.c | 2 +- .../secp256k1/src/precomputed_ecmult_gen.h | 4 +- secp256k1-sys/depend/secp256k1/src/scalar.h | 60 +- .../depend/secp256k1/src/scalar_4x64.h | 2 +- .../depend/secp256k1/src/scalar_4x64_impl.h | 451 +- .../depend/secp256k1/src/scalar_8x32.h | 2 +- .../depend/secp256k1/src/scalar_8x32_impl.h | 292 +- .../depend/secp256k1/src/scalar_impl.h | 94 +- .../depend/secp256k1/src/scalar_low.h | 13 +- .../depend/secp256k1/src/scalar_low_impl.h | 135 +- secp256k1-sys/depend/secp256k1/src/scratch.h | 20 +- .../depend/secp256k1/src/scratch_impl.h | 26 +- .../depend/secp256k1/src/secp256k1.c | 500 +- secp256k1-sys/depend/secp256k1/src/selftest.h | 16 +- secp256k1-sys/depend/secp256k1/src/testrand.h | 22 +- .../depend/secp256k1/src/testrand_impl.h | 72 +- secp256k1-sys/depend/secp256k1/src/tests.c | 4284 ++++++++--------- .../depend/secp256k1/src/tests_exhaustive.c | 313 +- secp256k1-sys/depend/secp256k1/src/testutil.h | 29 + secp256k1-sys/depend/secp256k1/src/util.h | 63 +- .../depend/secp256k1/src/util.h.orig | 380 ++ .../src/wycheproof/WYCHEPROOF_COPYING | 8 +- .../depend/secp256k1/tools/check-abi.sh | 64 + secp256k1-sys/src/lib.rs | 128 +- secp256k1-sys/src/recovery.rs | 10 +- 132 files changed, 8089 insertions(+), 8160 deletions(-) create mode 100644 secp256k1-sys/depend/secp256k1/.cirrus.yml create mode 100644 secp256k1-sys/depend/secp256k1/CONTRIBUTING.md create mode 100644 secp256k1-sys/depend/secp256k1/cmake/GeneratePkgConfigFile.cmake delete mode 100644 secp256k1-sys/depend/secp256k1/src/field_5x52_asm_impl.h create mode 100644 secp256k1-sys/depend/secp256k1/src/testutil.h create mode 100644 secp256k1-sys/depend/secp256k1/src/util.h.orig create mode 100755 secp256k1-sys/depend/secp256k1/tools/check-abi.sh diff --git a/Cargo-minimal.lock b/Cargo-minimal.lock index a8df44ee2..6fbb97f00 100644 --- a/Cargo-minimal.lock +++ b/Cargo-minimal.lock @@ -273,7 +273,7 @@ dependencies = [ [[package]] name = "secp256k1-sys" -version = "0.9.2" +version = "0.10.0" dependencies = [ "cc", "libc", diff --git a/Cargo-recent.lock b/Cargo-recent.lock index c98018af2..c05a8b2ef 100644 --- a/Cargo-recent.lock +++ b/Cargo-recent.lock @@ -194,7 +194,7 @@ dependencies = [ [[package]] name = "secp256k1-sys" -version = "0.9.2" +version = "0.10.0" dependencies = [ "cc", "libc", diff --git a/Cargo.toml b/Cargo.toml index 80cecd1d9..2a7af1b04 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,7 +36,7 @@ global-context = ["std"] global-context-less-secure = ["global-context"] [dependencies] -secp256k1-sys = { version = "0.9.2", default-features = false, path = "./secp256k1-sys" } +secp256k1-sys = { version = "0.10.0", default-features = false, path = "./secp256k1-sys" } serde = { version = "1.0.103", default-features = false, optional = true } # You likely only want to enable these if you explicitly do not want to use "std", otherwise enable diff --git a/secp256k1-sys/CHANGELOG.md b/secp256k1-sys/CHANGELOG.md index 77e84853d..d2ec70422 100644 --- a/secp256k1-sys/CHANGELOG.md +++ b/secp256k1-sys/CHANGELOG.md @@ -1,6 +1,7 @@ -# Unreleased +# 0.10.0 - 2024-03-28 -* Bump MSRV to Rust `v1.56.1` +* Bump MSRV to Rust `v1.56.1` [#693](https://github.com/rust-bitcoin/rust-secp256k1/pull/693) +* Vendor `secp256k1 v0.4.1` [#688](https://github.com/rust-bitcoin/rust-secp256k1/pull/688) # 0.9.2 - 2023-12-18 diff --git a/secp256k1-sys/Cargo.toml b/secp256k1-sys/Cargo.toml index 2ca55b53d..5d591ffcb 100644 --- a/secp256k1-sys/Cargo.toml +++ b/secp256k1-sys/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "secp256k1-sys" -version = "0.9.2" +version = "0.10.0" authors = [ "Dawid Ciężarkiewicz ", "Andrew Poelstra ", "Steven Roose " ] @@ -12,7 +12,7 @@ description = "FFI for Pieter Wuille's `libsecp256k1` library." keywords = [ "secp256k1", "libsecp256k1", "ffi" ] readme = "README.md" build = "build.rs" -links = "rustsecp256k1_v0_9_2" +links = "rustsecp256k1_v0_10_0" edition = "2021" rust-version = "1.56.1" diff --git a/secp256k1-sys/depend/secp256k1-HEAD-revision.txt b/secp256k1-sys/depend/secp256k1-HEAD-revision.txt index 96fcb1e98..03d4132a9 100644 --- a/secp256k1-sys/depend/secp256k1-HEAD-revision.txt +++ b/secp256k1-sys/depend/secp256k1-HEAD-revision.txt @@ -1,2 +1,2 @@ # This file was automatically created by vendor-libsecp.sh -199d27cea32203b224b208627533c2e813cd3b21 +1ad5185cd42c0636104129fcc9f6a4bf9c67cc40 diff --git a/secp256k1-sys/depend/secp256k1/.cirrus.yml b/secp256k1-sys/depend/secp256k1/.cirrus.yml new file mode 100644 index 000000000..04aa8f240 --- /dev/null +++ b/secp256k1-sys/depend/secp256k1/.cirrus.yml @@ -0,0 +1,95 @@ +env: + ### cirrus config + CIRRUS_CLONE_DEPTH: 1 + ### compiler options + HOST: + WRAPPER_CMD: + # Specific warnings can be disabled with -Wno-error=foo. + # -pedantic-errors is not equivalent to -Werror=pedantic and thus not implied by -Werror according to the GCC manual. + WERROR_CFLAGS: -Werror -pedantic-errors + MAKEFLAGS: -j4 + BUILD: check + ### secp256k1 config + ECMULTWINDOW: auto + ECMULTGENPRECISION: auto + ASM: no + WIDEMUL: auto + WITH_VALGRIND: yes + EXTRAFLAGS: + ### secp256k1 modules + EXPERIMENTAL: no + ECDH: no + RECOVERY: no + SCHNORRSIG: no + ELLSWIFT: no + ### test options + SECP256K1_TEST_ITERS: + BENCH: yes + SECP256K1_BENCH_ITERS: 2 + CTIMETESTS: yes + # Compile and run the tests + EXAMPLES: yes + +cat_logs_snippet: &CAT_LOGS + always: + cat_tests_log_script: + - cat tests.log || true + cat_noverify_tests_log_script: + - cat noverify_tests.log || true + cat_exhaustive_tests_log_script: + - cat exhaustive_tests.log || true + cat_ctime_tests_log_script: + - cat ctime_tests.log || true + cat_bench_log_script: + - cat bench.log || true + cat_config_log_script: + - cat config.log || true + cat_test_env_script: + - cat test_env.log || true + cat_ci_env_script: + - env + +linux_arm64_container_snippet: &LINUX_ARM64_CONTAINER + env_script: + - env | tee /tmp/env + build_script: + - DOCKER_BUILDKIT=1 docker build --file "ci/linux-debian.Dockerfile" --tag="ci_secp256k1_arm" + - docker image prune --force # Cleanup stale layers + test_script: + - docker run --rm --mount "type=bind,src=./,dst=/ci_secp256k1" --env-file /tmp/env --replace --name "ci_secp256k1_arm" "ci_secp256k1_arm" bash -c "cd /ci_secp256k1/ && ./ci/ci.sh" + +task: + name: "ARM64: Linux (Debian stable)" + persistent_worker: + labels: + type: arm64 + env: + ECDH: yes + RECOVERY: yes + SCHNORRSIG: yes + ELLSWIFT: yes + matrix: + # Currently only gcc-snapshot, the other compilers are tested on GHA with QEMU + - env: { CC: 'gcc-snapshot' } + << : *LINUX_ARM64_CONTAINER + << : *CAT_LOGS + +task: + name: "ARM64: Linux (Debian stable), Valgrind" + persistent_worker: + labels: + type: arm64 + env: + ECDH: yes + RECOVERY: yes + SCHNORRSIG: yes + ELLSWIFT: yes + WRAPPER_CMD: 'valgrind --error-exitcode=42' + SECP256K1_TEST_ITERS: 2 + matrix: + - env: { CC: 'gcc' } + - env: { CC: 'clang' } + - env: { CC: 'gcc-snapshot' } + - env: { CC: 'clang-snapshot' } + << : *LINUX_ARM64_CONTAINER + << : *CAT_LOGS diff --git a/secp256k1-sys/depend/secp256k1/.github/actions/run-in-docker-action/action.yml b/secp256k1-sys/depend/secp256k1/.github/actions/run-in-docker-action/action.yml index d357c3cf7..dbfaa4fec 100644 --- a/secp256k1-sys/depend/secp256k1/.github/actions/run-in-docker-action/action.yml +++ b/secp256k1-sys/depend/secp256k1/.github/actions/run-in-docker-action/action.yml @@ -14,9 +14,9 @@ inputs: runs: using: "composite" steps: - - uses: docker/setup-buildx-action@v2 + - uses: docker/setup-buildx-action@v3 - - uses: docker/build-push-action@v4 + - uses: docker/build-push-action@v5 id: main_builder continue-on-error: true with: @@ -26,7 +26,7 @@ runs: load: true cache-from: type=gha - - uses: docker/build-push-action@v4 + - uses: docker/build-push-action@v5 id: retry_builder if: steps.main_builder.outcome == 'failure' with: diff --git a/secp256k1-sys/depend/secp256k1/.github/workflows/ci.yml b/secp256k1-sys/depend/secp256k1/.github/workflows/ci.yml index b9a9eaa82..4ad905af5 100644 --- a/secp256k1-sys/depend/secp256k1/.github/workflows/ci.yml +++ b/secp256k1-sys/depend/secp256k1/.github/workflows/ci.yml @@ -47,14 +47,14 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: # See: https://github.com/moby/buildkit/issues/3969. driver-opts: | network=host - name: Build container - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 with: file: ./ci/linux-debian.Dockerfile tags: linux-debian-image @@ -792,7 +792,7 @@ jobs: - name: Check installation with Autotools env: - CI_INSTALL: ${{ runner.temp }}/${{ github.run_id }}${{ github.action }} + CI_INSTALL: ${{ runner.temp }}/${{ github.run_id }}${{ github.action }}/install run: | ./autogen.sh && ./configure --prefix=${{ env.CI_INSTALL }} && make clean && make install && ls -RlAh ${{ env.CI_INSTALL }} gcc -o ecdsa examples/ecdsa.c $(PKG_CONFIG_PATH=${{ env.CI_INSTALL }}/lib/pkgconfig pkg-config --cflags --libs libsecp256k1) -Wl,-rpath,"${{ env.CI_INSTALL }}/lib" && ./ecdsa diff --git a/secp256k1-sys/depend/secp256k1/CHANGELOG.md b/secp256k1-sys/depend/secp256k1/CHANGELOG.md index e8d8db5a1..e42420f67 100644 --- a/secp256k1-sys/depend/secp256k1/CHANGELOG.md +++ b/secp256k1-sys/depend/secp256k1/CHANGELOG.md @@ -5,6 +5,15 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.4.1] - 2023-12-21 + +#### Changed + - The point multiplication algorithm used for ECDH operations (module `ecdh`) was replaced with a slightly faster one. + - Optional handwritten x86_64 assembly for field operations was removed because modern C compilers are able to output more efficient assembly. This change results in a significant speedup of some library functions when handwritten x86_64 assembly is enabled (`--with-asm=x86_64` in GNU Autotools, `-DSECP256K1_ASM=x86_64` in CMake), which is the default on x86_64. Benchmarks with GCC 10.5.0 show a 10% speedup for `secp256k1_ecdsa_verify` and `secp256k1_schnorrsig_verify`. + +#### ABI Compatibility +The ABI is backward compatible with versions 0.4.0 and 0.3.x. + ## [0.4.0] - 2023-09-04 #### Added @@ -104,7 +113,7 @@ This version was in fact never released. The number was given by the build system since the introduction of autotools in Jan 2014 (ea0fe5a5bf0c04f9cc955b2966b614f5f378c6f6). Therefore, this version number does not uniquely identify a set of source files. -[unreleased]: https://github.com/bitcoin-core/secp256k1/compare/v0.4.0...HEAD +[0.4.1]: https://github.com/bitcoin-core/secp256k1/compare/v0.4.0...v0.4.1 [0.4.0]: https://github.com/bitcoin-core/secp256k1/compare/v0.3.2...v0.4.0 [0.3.2]: https://github.com/bitcoin-core/secp256k1/compare/v0.3.1...v0.3.2 [0.3.1]: https://github.com/bitcoin-core/secp256k1/compare/v0.3.0...v0.3.1 diff --git a/secp256k1-sys/depend/secp256k1/CMakeLists.txt b/secp256k1-sys/depend/secp256k1/CMakeLists.txt index cdac47ba9..b7aa20c18 100644 --- a/secp256k1-sys/depend/secp256k1/CMakeLists.txt +++ b/secp256k1-sys/depend/secp256k1/CMakeLists.txt @@ -11,7 +11,7 @@ project(libsecp256k1 # The package (a.k.a. release) version is based on semantic versioning 2.0.0 of # the API. All changes in experimental modules are treated as # backwards-compatible and therefore at most increase the minor version. - VERSION 0.4.0 + VERSION 0.4.1 DESCRIPTION "Optimized C library for ECDSA signatures and secret/public key operations on curve secp256k1." HOMEPAGE_URL "https://github.com/bitcoin-core/secp256k1" LANGUAGES C @@ -35,7 +35,7 @@ endif() # All changes in experimental modules are treated as if they don't affect the # interface and therefore only increase the revision. set(${PROJECT_NAME}_LIB_VERSION_CURRENT 3) -set(${PROJECT_NAME}_LIB_VERSION_REVISION 0) +set(${PROJECT_NAME}_LIB_VERSION_REVISION 1) set(${PROJECT_NAME}_LIB_VERSION_AGE 1) set(CMAKE_C_STANDARD 90) @@ -107,7 +107,7 @@ if(SECP256K1_TEST_OVERRIDE_WIDE_MULTIPLY) endif() mark_as_advanced(FORCE SECP256K1_TEST_OVERRIDE_WIDE_MULTIPLY) -set(SECP256K1_ASM "AUTO" CACHE STRING "Assembly optimizations to use: \"AUTO\", \"OFF\", \"x86_64\" or \"arm32\" (experimental). [default=AUTO]") +set(SECP256K1_ASM "AUTO" CACHE STRING "Assembly to use: \"AUTO\", \"OFF\", \"x86_64\" or \"arm32\" (experimental). [default=AUTO]") set_property(CACHE SECP256K1_ASM PROPERTY STRINGS "AUTO" "OFF" "x86_64" "arm32") check_string_option_value(SECP256K1_ASM) if(SECP256K1_ASM STREQUAL "arm32") @@ -117,7 +117,7 @@ if(SECP256K1_ASM STREQUAL "arm32") if(HAVE_ARM32_ASM) add_compile_definitions(USE_EXTERNAL_ASM=1) else() - message(FATAL_ERROR "ARM32 assembly optimization requested but not available.") + message(FATAL_ERROR "ARM32 assembly requested but not available.") endif() elseif(SECP256K1_ASM) include(CheckX86_64Assembly) @@ -128,14 +128,14 @@ elseif(SECP256K1_ASM) elseif(SECP256K1_ASM STREQUAL "AUTO") set(SECP256K1_ASM "OFF") else() - message(FATAL_ERROR "x86_64 assembly optimization requested but not available.") + message(FATAL_ERROR "x86_64 assembly requested but not available.") endif() endif() option(SECP256K1_EXPERIMENTAL "Allow experimental configuration options." OFF) if(NOT SECP256K1_EXPERIMENTAL) if(SECP256K1_ASM STREQUAL "arm32") - message(FATAL_ERROR "ARM32 assembly optimization is experimental. Use -DSECP256K1_EXPERIMENTAL=ON to allow.") + message(FATAL_ERROR "ARM32 assembly is experimental. Use -DSECP256K1_EXPERIMENTAL=ON to allow.") endif() endif() @@ -280,7 +280,7 @@ message("Parameters:") message(" ecmult window size .................. ${SECP256K1_ECMULT_WINDOW_SIZE}") message(" ecmult gen precision bits ........... ${SECP256K1_ECMULT_GEN_PREC_BITS}") message("Optional features:") -message(" assembly optimization ............... ${SECP256K1_ASM}") +message(" assembly ............................ ${SECP256K1_ASM}") message(" external callbacks .................. ${SECP256K1_USE_EXTERNAL_DEFAULT_CALLBACKS}") if(SECP256K1_TEST_OVERRIDE_WIDE_MULTIPLY) message(" wide multiplication (test-only) ..... ${SECP256K1_TEST_OVERRIDE_WIDE_MULTIPLY}") diff --git a/secp256k1-sys/depend/secp256k1/CONTRIBUTING.md b/secp256k1-sys/depend/secp256k1/CONTRIBUTING.md new file mode 100644 index 000000000..921704ff5 --- /dev/null +++ b/secp256k1-sys/depend/secp256k1/CONTRIBUTING.md @@ -0,0 +1,107 @@ +# Contributing to libsecp256k1 + +## Scope + +libsecp256k1 is a library for elliptic curve cryptography on the curve secp256k1, not a general-purpose cryptography library. +The library primarily serves the needs of the Bitcoin Core project but provides additional functionality for the benefit of the wider Bitcoin ecosystem. + +## Adding new functionality or modules + +The libsecp256k1 project welcomes contributions in the form of new functionality or modules, provided they are within the project's scope. + +It is the responsibility of the contributors to convince the maintainers that the proposed functionality is within the project's scope, high-quality and maintainable. +Contributors are recommended to provide the following in addition to the new code: + +* **Specification:** + A specification can help significantly in reviewing the new code as it provides documentation and context. + It may justify various design decisions, give a motivation and outline security goals. + If the specification contains pseudocode, a reference implementation or test vectors, these can be used to compare with the proposed libsecp256k1 code. +* **Security Arguments:** + In addition to a defining the security goals, it should be argued that the new functionality meets these goals. + Depending on the nature of the new functionality, a wide range of security arguments are acceptable, ranging from being "obviously secure" to rigorous proofs of security. +* **Relevance Arguments:** + The relevance of the new functionality for the Bitcoin ecosystem should be argued by outlining clear use cases. + +These are not the only factors taken into account when considering to add new functionality. +The proposed new libsecp256k1 code must be of high quality, including API documentation and tests, as well as featuring a misuse-resistant API design. + +We recommend reaching out to other contributors (see [Communication Channels](#communication-channels)) and get feedback before implementing new functionality. + +## Communication channels + +Most communication about libsecp256k1 occurs on the GitHub repository: in issues, pull request or on the discussion board. + +Additionally, there is an IRC channel dedicated to libsecp256k1, with biweekly meetings (see channel topic). +The channel is `#secp256k1` on Libera Chat. +The easiest way to participate on IRC is with the web client, [web.libera.chat](https://web.libera.chat/#secp256k1). +Chat history logs can be found at https://gnusha.org/secp256k1/. + +## Contributor workflow & peer review + +The Contributor Workflow & Peer Review in libsecp256k1 are similar to Bitcoin Core's workflow and review processes described in its [CONTRIBUTING.md](https://github.com/bitcoin/bitcoin/blob/master/CONTRIBUTING.md). + +### Coding conventions + +In addition, libsecp256k1 tries to maintain the following coding conventions: + +* No runtime heap allocation (e.g., no `malloc`) unless explicitly requested by the caller (via `rustsecp256k1_v0_10_0_context_create` or `rustsecp256k1_v0_10_0_scratch_space_create`, for example). Morever, it should be possible to use the library without any heap allocations. +* The tests should cover all lines and branches of the library (see [Test coverage](#coverage)). +* Operations involving secret data should be tested for being constant time with respect to the secrets (see [src/ctime_tests.c](src/ctime_tests.c)). +* Local variables containing secret data should be cleared explicitly to try to delete secrets from memory. +* Use `rustsecp256k1_v0_10_0_memcmp_var` instead of `memcmp` (see [#823](https://github.com/bitcoin-core/secp256k1/issues/823)). + +#### Style conventions + +* Commits should be atomic and diffs should be easy to read. For this reason, do not mix any formatting fixes or code moves with actual code changes. Make sure each individual commit is hygienic: that it builds successfully on its own without warnings, errors, regressions, or test failures. +* New code should adhere to the style of existing, in particular surrounding, code. Other than that, we do not enforce strict rules for code formatting. +* The code conforms to C89. Most notably, that means that only `/* ... */` comments are allowed (no `//` line comments). Moreover, any declarations in a `{ ... }` block (e.g., a function) must appear at the beginning of the block before any statements. When you would like to declare a variable in the middle of a block, you can open a new block: + ```C + void secp256k_foo(void) { + unsigned int x; /* declaration */ + int y = 2*x; /* declaration */ + x = 17; /* statement */ + { + int a, b; /* declaration */ + a = x + y; /* statement */ + secp256k_bar(x, &b); /* statement */ + } + } + ``` +* Use `unsigned int` instead of just `unsigned`. +* Use `void *ptr` instead of `void* ptr`. +* Arguments of the publicly-facing API must have a specific order defined in [include/secp256k1.h](include/secp256k1.h). +* User-facing comment lines in headers should be limited to 80 chars if possible. +* All identifiers in file scope should start with `rustsecp256k1_v0_10_0_`. +* Avoid trailing whitespace. + +### Tests + +#### Coverage + +This library aims to have full coverage of reachable lines and branches. + +To create a test coverage report, configure with `--enable-coverage` (use of GCC is necessary): + + $ ./configure --enable-coverage + +Run the tests: + + $ make check + +To create a report, `gcovr` is recommended, as it includes branch coverage reporting: + + $ gcovr --exclude 'src/bench*' --print-summary + +To create a HTML report with coloured and annotated source code: + + $ mkdir -p coverage + $ gcovr --exclude 'src/bench*' --html --html-details -o coverage/coverage.html + +#### Exhaustive tests + +There are tests of several functions in which a small group replaces secp256k1. +These tests are *exhaustive* since they provide all elements and scalars of the small group as input arguments (see [src/tests_exhaustive.c](src/tests_exhaustive.c)). + +### Benchmarks + +See `src/bench*.c` for examples of benchmarks. diff --git a/secp256k1-sys/depend/secp256k1/Makefile.am b/secp256k1-sys/depend/secp256k1/Makefile.am index 1bf783395..a26faca78 100644 --- a/secp256k1-sys/depend/secp256k1/Makefile.am +++ b/secp256k1-sys/depend/secp256k1/Makefile.am @@ -6,7 +6,7 @@ AM_CFLAGS = $(SECP_CFLAGS) lib_LTLIBRARIES = libsecp256k1.la include_HEADERS = include/secp256k1.h -include_HEADERS += include/rustsecp256k1_v0_9_2_preallocated.h +include_HEADERS += include/rustsecp256k1_v0_10_0_preallocated.h noinst_HEADERS = noinst_HEADERS += src/scalar.h noinst_HEADERS += src/scalar_4x64.h @@ -37,7 +37,6 @@ noinst_HEADERS += src/field_10x26_impl.h noinst_HEADERS += src/field_5x52.h noinst_HEADERS += src/field_5x52_impl.h noinst_HEADERS += src/field_5x52_int128_impl.h -noinst_HEADERS += src/field_5x52_asm_impl.h noinst_HEADERS += src/modinv32.h noinst_HEADERS += src/modinv32_impl.h noinst_HEADERS += src/modinv64.h @@ -46,6 +45,7 @@ noinst_HEADERS += src/precomputed_ecmult.h noinst_HEADERS += src/precomputed_ecmult_gen.h noinst_HEADERS += src/assumptions.h noinst_HEADERS += src/checkmem.h +noinst_HEADERS += src/testutil.h noinst_HEADERS += src/util.h noinst_HEADERS += src/int128.h noinst_HEADERS += src/int128_impl.h @@ -63,22 +63,22 @@ noinst_HEADERS += src/hash_impl.h noinst_HEADERS += src/field.h noinst_HEADERS += src/field_impl.h noinst_HEADERS += src/bench.h -noinst_HEADERS += src/wycheproof/ecdsa_rustsecp256k1_v0_9_2_sha256_bitcoin_test.h +noinst_HEADERS += src/wycheproof/ecdsa_rustsecp256k1_v0_10_0_sha256_bitcoin_test.h noinst_HEADERS += contrib/lax_der_parsing.h noinst_HEADERS += contrib/lax_der_parsing.c noinst_HEADERS += contrib/lax_der_privatekey_parsing.h noinst_HEADERS += contrib/lax_der_privatekey_parsing.c noinst_HEADERS += examples/examples_util.h -PRECOMPUTED_LIB = librustsecp256k1_v0_9_2_precomputed.la +PRECOMPUTED_LIB = librustsecp256k1_v0_10_0_precomputed.la noinst_LTLIBRARIES = $(PRECOMPUTED_LIB) -librustsecp256k1_v0_9_2_precomputed_la_SOURCES = src/precomputed_ecmult.c src/precomputed_ecmult_gen.c -# We need `-I$(top_srcdir)/src` in VPATH builds if librustsecp256k1_v0_9_2_precomputed_la_SOURCES have been recreated in the build tree. +librustsecp256k1_v0_10_0_precomputed_la_SOURCES = src/precomputed_ecmult.c src/precomputed_ecmult_gen.c +# We need `-I$(top_srcdir)/src` in VPATH builds if librustsecp256k1_v0_10_0_precomputed_la_SOURCES have been recreated in the build tree. # This helps users and packagers who insist on recreating the precomputed files (e.g., Gentoo). -librustsecp256k1_v0_9_2_precomputed_la_CPPFLAGS = -I$(top_srcdir)/src $(SECP_CONFIG_DEFINES) +librustsecp256k1_v0_10_0_precomputed_la_CPPFLAGS = -I$(top_srcdir)/src $(SECP_CONFIG_DEFINES) if USE_EXTERNAL_ASM -COMMON_LIB = librustsecp256k1_v0_9_2_common.la +COMMON_LIB = librustsecp256k1_v0_10_0_common.la else COMMON_LIB = endif @@ -89,14 +89,14 @@ pkgconfig_DATA = libsecp256k1.pc if USE_EXTERNAL_ASM if USE_ASM_ARM -librustsecp256k1_v0_9_2_common_la_SOURCES = src/asm/field_10x26_arm.s +librustsecp256k1_v0_10_0_common_la_SOURCES = src/asm/field_10x26_arm.s endif endif -librustsecp256k1_v0_9_2_la_SOURCES = src/secp256k1.c -librustsecp256k1_v0_9_2_la_CPPFLAGS = $(SECP_CONFIG_DEFINES) -librustsecp256k1_v0_9_2_la_LIBADD = $(COMMON_LIB) $(PRECOMPUTED_LIB) -librustsecp256k1_v0_9_2_la_LDFLAGS = -no-undefined -version-info $(LIB_VERSION_CURRENT):$(LIB_VERSION_REVISION):$(LIB_VERSION_AGE) +librustsecp256k1_v0_10_0_la_SOURCES = src/secp256k1.c +librustsecp256k1_v0_10_0_la_CPPFLAGS = $(SECP_CONFIG_DEFINES) +librustsecp256k1_v0_10_0_la_LIBADD = $(COMMON_LIB) $(PRECOMPUTED_LIB) +librustsecp256k1_v0_10_0_la_LDFLAGS = -no-undefined -version-info $(LIB_VERSION_CURRENT):$(LIB_VERSION_REVISION):$(LIB_VERSION_AGE) noinst_PROGRAMS = if USE_BENCHMARK @@ -223,11 +223,11 @@ maintainer-clean-local: clean-precomp ### Pregenerated test vectors ### (see the comments in the previous section for detailed rationale) -TESTVECTORS = src/wycheproof/ecdsa_rustsecp256k1_v0_9_2_sha256_bitcoin_test.h +TESTVECTORS = src/wycheproof/ecdsa_rustsecp256k1_v0_10_0_sha256_bitcoin_test.h -src/wycheproof/ecdsa_rustsecp256k1_v0_9_2_sha256_bitcoin_test.h: +src/wycheproof/ecdsa_rustsecp256k1_v0_10_0_sha256_bitcoin_test.h: mkdir -p $(@D) - python3 $(top_srcdir)/tools/tests_wycheproof_generate.py $(top_srcdir)/src/wycheproof/ecdsa_rustsecp256k1_v0_9_2_sha256_bitcoin_test.json > $@ + python3 $(top_srcdir)/tools/tests_wycheproof_generate.py $(top_srcdir)/src/wycheproof/ecdsa_rustsecp256k1_v0_10_0_sha256_bitcoin_test.json > $@ testvectors: $(TESTVECTORS) @@ -246,10 +246,10 @@ EXTRA_DIST += sage/gen_exhaustive_groups.sage EXTRA_DIST += sage/gen_split_lambda_constants.sage EXTRA_DIST += sage/group_prover.sage EXTRA_DIST += sage/prove_group_implementations.sage -EXTRA_DIST += sage/rustsecp256k1_v0_9_2_params.sage +EXTRA_DIST += sage/rustsecp256k1_v0_10_0_params.sage EXTRA_DIST += sage/weierstrass_prover.sage EXTRA_DIST += src/wycheproof/WYCHEPROOF_COPYING -EXTRA_DIST += src/wycheproof/ecdsa_rustsecp256k1_v0_9_2_sha256_bitcoin_test.json +EXTRA_DIST += src/wycheproof/ecdsa_rustsecp256k1_v0_10_0_sha256_bitcoin_test.json EXTRA_DIST += tools/tests_wycheproof_generate.py if ENABLE_MODULE_ECDH diff --git a/secp256k1-sys/depend/secp256k1/README.md b/secp256k1-sys/depend/secp256k1/README.md index 19dabe850..4013e6a93 100644 --- a/secp256k1-sys/depend/secp256k1/README.md +++ b/secp256k1-sys/depend/secp256k1/README.md @@ -1,11 +1,10 @@ libsecp256k1 ============ -[![Build Status](https://api.cirrus-ci.com/github/bitcoin-core/secp256k1.svg?branch=master)](https://cirrus-ci.com/github/bitcoin-core/secp256k1) ![Dependencies: None](https://img.shields.io/badge/dependencies-none-success) [![irc.libera.chat #secp256k1](https://img.shields.io/badge/irc.libera.chat-%23secp256k1-success)](https://web.libera.chat/#secp256k1) -Optimized C library for ECDSA signatures and secret/public key operations on curve secp256k1. +High-performance high-assurance C library for digital signatures and other cryptographic primitives on the secp256k1 elliptic curve. This library is intended to be the highest quality publicly available library for cryptography on the secp256k1 curve. However, the primary focus of its development has been for usage in the Bitcoin system and usage unlike Bitcoin's may be less well tested, verified, or suffer from a less well thought out interface. Correct usage requires some care and consideration that the library is fit for your application's purpose. @@ -34,7 +33,7 @@ Implementation details * Expose only higher level interfaces to minimize the API surface and improve application security. ("Be difficult to use insecurely.") * Field operations * Optimized implementation of arithmetic modulo the curve's field size (2^256 - 0x1000003D1). - * Using 5 52-bit limbs (including hand-optimized assembly for x86_64, by Diederik Huys). + * Using 5 52-bit limbs * Using 10 26-bit limbs (including hand-optimized assembly for 32-bit ARM, by Wladimir J. van der Laan). * This is an experimental feature that has not received enough scrutiny to satisfy the standard of quality of this library but is made available for testing and review by the community. * Scalar operations @@ -117,28 +116,6 @@ Usage examples can be found in the [examples](examples) directory. To compile th To compile the Schnorr signature and ECDH examples, you also need to configure with `--enable-module-schnorrsig` and `--enable-module-ecdh`. -Test coverage ------------ - -This library aims to have full coverage of the reachable lines and branches. - -To create a test coverage report, configure with `--enable-coverage` (use of GCC is necessary): - - $ ./configure --enable-coverage - -Run the tests: - - $ make check - -To create a report, `gcovr` is recommended, as it includes branch coverage reporting: - - $ gcovr --exclude 'src/bench*' --print-summary - -To create a HTML report with coloured and annotated source code: - - $ mkdir -p coverage - $ gcovr --exclude 'src/bench*' --html --html-details -o coverage/coverage.html - Benchmark ------------ If configured with `--enable-benchmark` (which is the default), binaries for benchmarking the libsecp256k1 functions will be present in the root directory after the build. @@ -155,3 +132,8 @@ Reporting a vulnerability ------------ See [SECURITY.md](SECURITY.md) + +Contributing to libsecp256k1 +------------ + +See [CONTRIBUTING.md](CONTRIBUTING.md) diff --git a/secp256k1-sys/depend/secp256k1/ci/ci.sh b/secp256k1-sys/depend/secp256k1/ci/ci.sh index 719e7851e..9cc715955 100755 --- a/secp256k1-sys/depend/secp256k1/ci/ci.sh +++ b/secp256k1-sys/depend/secp256k1/ci/ci.sh @@ -83,7 +83,21 @@ esac --host="$HOST" $EXTRAFLAGS # We have set "-j" in MAKEFLAGS. -make +build_exit_code=0 +make > make.log 2>&1 || build_exit_code=$? +cat make.log +if [ $build_exit_code -ne 0 ]; then + case "${CC:-undefined}" in + *snapshot*) + # Ignore internal compiler errors in gcc-snapshot and clang-snapshot + grep -e "internal compiler error:" -e "PLEASE submit a bug report" make.log + return $?; + ;; + *) + return 1; + ;; + esac +fi # Print information about binaries so that we can see that the architecture is correct file *tests* || true diff --git a/secp256k1-sys/depend/secp256k1/ci/linux-debian.Dockerfile b/secp256k1-sys/depend/secp256k1/ci/linux-debian.Dockerfile index e719907e8..5ce715b41 100644 --- a/secp256k1-sys/depend/secp256k1/ci/linux-debian.Dockerfile +++ b/secp256k1-sys/depend/secp256k1/ci/linux-debian.Dockerfile @@ -29,11 +29,15 @@ RUN apt-get update && apt-get install --no-install-recommends -y \ gcc-i686-linux-gnu libc6-dev-i386-cross libc6-dbg:i386 libubsan1:i386 libasan8:i386 \ gcc-s390x-linux-gnu libc6-dev-s390x-cross libc6-dbg:s390x \ gcc-arm-linux-gnueabihf libc6-dev-armhf-cross libc6-dbg:armhf \ - gcc-aarch64-linux-gnu libc6-dev-arm64-cross libc6-dbg:arm64 \ gcc-powerpc64le-linux-gnu libc6-dev-ppc64el-cross libc6-dbg:ppc64el \ gcc-mingw-w64-x86-64-win32 wine64 wine \ gcc-mingw-w64-i686-win32 wine32 \ - python3 + python3 && \ + if ! ( dpkg --print-architecture | grep --quiet "arm64" ) ; then \ + apt-get install --no-install-recommends -y \ + gcc-aarch64-linux-gnu libc6-dev-arm64-cross libc6-dbg:arm64 ;\ + fi && \ + apt-get clean && rm -rf /var/lib/apt/lists/* # Build and install gcc snapshot ARG GCC_SNAPSHOT_MAJOR=14 @@ -44,7 +48,7 @@ RUN apt-get update && apt-get install --no-install-recommends -y wget libgmp-dev sha512sum --check --ignore-missing sha512.sum && \ # We should have downloaded exactly one tar.xz file ls && \ - [[ $(ls *.tar.xz | wc -l) -eq "1" ]] && \ + [ $(ls *.tar.xz | wc -l) -eq "1" ] && \ tar xf *.tar.xz && \ mkdir gcc-build && cd gcc-build && \ ../*/configure --prefix=/opt/gcc-snapshot --enable-languages=c --disable-bootstrap --disable-multilib --without-isl && \ diff --git a/secp256k1-sys/depend/secp256k1/cmake/GeneratePkgConfigFile.cmake b/secp256k1-sys/depend/secp256k1/cmake/GeneratePkgConfigFile.cmake new file mode 100644 index 000000000..9c1d7f1dd --- /dev/null +++ b/secp256k1-sys/depend/secp256k1/cmake/GeneratePkgConfigFile.cmake @@ -0,0 +1,8 @@ +function(generate_pkg_config_file in_file) + set(prefix ${CMAKE_INSTALL_PREFIX}) + set(exec_prefix \${prefix}) + set(libdir \${exec_prefix}/${CMAKE_INSTALL_LIBDIR}) + set(includedir \${prefix}/${CMAKE_INSTALL_INCLUDEDIR}) + set(PACKAGE_VERSION ${PROJECT_VERSION}) + configure_file(${in_file} ${PROJECT_NAME}.pc @ONLY) +endfunction() diff --git a/secp256k1-sys/depend/secp256k1/cmake/TryAppendCFlags.cmake b/secp256k1-sys/depend/secp256k1/cmake/TryAppendCFlags.cmake index 66c76ac6e..718eb91a3 100644 --- a/secp256k1-sys/depend/secp256k1/cmake/TryAppendCFlags.cmake +++ b/secp256k1-sys/depend/secp256k1/cmake/TryAppendCFlags.cmake @@ -1,6 +1,6 @@ include(CheckCCompilerFlag) -function(rustsecp256k1_v0_9_2_check_c_flags_internal flags output) +function(rustsecp256k1_v0_10_0_check_c_flags_internal flags output) string(MAKE_C_IDENTIFIER "${flags}" result) string(TOUPPER "${result}" result) set(result "C_SUPPORTS_${result}") @@ -17,7 +17,7 @@ endfunction() # Append flags to the COMPILE_OPTIONS directory property if CC accepts them. macro(try_append_c_flags) - rustsecp256k1_v0_9_2_check_c_flags_internal("${ARGV}" result) + rustsecp256k1_v0_10_0_check_c_flags_internal("${ARGV}" result) if(result) add_compile_options(${ARGV}) endif() diff --git a/secp256k1-sys/depend/secp256k1/configure.ac b/secp256k1-sys/depend/secp256k1/configure.ac index e3877850d..51ac230cf 100644 --- a/secp256k1-sys/depend/secp256k1/configure.ac +++ b/secp256k1-sys/depend/secp256k1/configure.ac @@ -5,7 +5,7 @@ AC_PREREQ([2.60]) # backwards-compatible and therefore at most increase the minor version. define(_PKG_VERSION_MAJOR, 0) define(_PKG_VERSION_MINOR, 4) -define(_PKG_VERSION_PATCH, 0) +define(_PKG_VERSION_PATCH, 1) define(_PKG_VERSION_IS_RELEASE, true) # The library version is based on libtool versioning of the ABI. The set of @@ -14,7 +14,7 @@ define(_PKG_VERSION_IS_RELEASE, true) # All changes in experimental modules are treated as if they don't affect the # interface and therefore only increase the revision. define(_LIB_VERSION_CURRENT, 3) -define(_LIB_VERSION_REVISION, 0) +define(_LIB_VERSION_REVISION, 1) define(_LIB_VERSION_AGE, 1) AC_INIT([libsecp256k1],m4_join([.], _PKG_VERSION_MAJOR, _PKG_VERSION_MINOR, _PKG_VERSION_PATCH)m4_if(_PKG_VERSION_IS_RELEASE, [true], [], [-dev]),[https://github.com/bitcoin-core/secp256k1/issues],[libsecp256k1],[https://github.com/bitcoin-core/secp256k1]) @@ -201,7 +201,7 @@ AC_ARG_ENABLE(external_default_callbacks, AC_ARG_WITH([test-override-wide-multiply], [] ,[set_widemul=$withval], [set_widemul=auto]) AC_ARG_WITH([asm], [AS_HELP_STRING([--with-asm=x86_64|arm32|no|auto], -[assembly optimizations to use (experimental: arm32) [default=auto]])],[req_asm=$withval], [req_asm=auto]) +[assembly to use (experimental: arm32) [default=auto]])],[req_asm=$withval], [req_asm=auto]) AC_ARG_WITH([ecmult-window], [AS_HELP_STRING([--with-ecmult-window=SIZE|auto], [window size for ecmult precomputation for verification, specified as integer in range [2..24].] @@ -279,24 +279,24 @@ else x86_64) SECP_X86_64_ASM_CHECK if test x"$has_x86_64_asm" != x"yes"; then - AC_MSG_ERROR([x86_64 assembly optimization requested but not available]) + AC_MSG_ERROR([x86_64 assembly requested but not available]) fi ;; arm32) SECP_ARM32_ASM_CHECK if test x"$has_arm32_asm" != x"yes"; then - AC_MSG_ERROR([ARM32 assembly optimization requested but not available]) + AC_MSG_ERROR([ARM32 assembly requested but not available]) fi ;; no) ;; *) - AC_MSG_ERROR([invalid assembly optimization selection]) + AC_MSG_ERROR([invalid assembly selection]) ;; esac fi -# Select assembly optimization +# Select assembly enable_external_asm=no case $set_asm in @@ -309,7 +309,7 @@ arm32) no) ;; *) - AC_MSG_ERROR([invalid assembly optimizations]) + AC_MSG_ERROR([invalid assembly selection]) ;; esac @@ -425,7 +425,7 @@ if test x"$enable_experimental" = x"yes"; then AC_MSG_NOTICE([******]) else if test x"$set_asm" = x"arm32"; then - AC_MSG_ERROR([ARM32 assembly optimization is experimental. Use --enable-experimental to allow.]) + AC_MSG_ERROR([ARM32 assembly is experimental. Use --enable-experimental to allow.]) fi fi diff --git a/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.c b/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.c index 2d1bd9caf..187234fbc 100644 --- a/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.c +++ b/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.c @@ -7,10 +7,10 @@ #include #include "lax_der_parsing.h" -extern int rustsecp256k1_v0_9_2_ecdsa_signature_parse_compact( - const rustsecp256k1_v0_9_2_context *ctx, - rustsecp256k1_v0_9_2_ecdsa_signature *sig, const unsigned char *input64); -int rustsecp256k1_v0_9_2_ecdsa_signature_parse_der_lax(const rustsecp256k1_v0_9_2_context* ctx, rustsecp256k1_v0_9_2_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) { +extern int rustsecp256k1_v0_10_0_ecdsa_signature_parse_compact( + const rustsecp256k1_v0_10_0_context *ctx, + rustsecp256k1_v0_10_0_ecdsa_signature *sig, const unsigned char *input64); +int rustsecp256k1_v0_10_0_ecdsa_signature_parse_der_lax(const rustsecp256k1_v0_10_0_context* ctx, rustsecp256k1_v0_10_0_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) { size_t rpos, rlen, spos, slen; size_t pos = 0; size_t lenbyte; @@ -18,7 +18,7 @@ int rustsecp256k1_v0_9_2_ecdsa_signature_parse_der_lax(const rustsecp256k1_v0_9_ int overflow = 0; /* Hack to initialize sig with a correctly-parsed but invalid signature. */ - rustsecp256k1_v0_9_2_ecdsa_signature_parse_compact(ctx, sig, tmpsig); + rustsecp256k1_v0_10_0_ecdsa_signature_parse_compact(ctx, sig, tmpsig); /* Sequence tag byte */ if (pos == inputlen || input[pos] != 0x30) { @@ -139,11 +139,11 @@ int rustsecp256k1_v0_9_2_ecdsa_signature_parse_der_lax(const rustsecp256k1_v0_9_ } if (!overflow) { - overflow = !rustsecp256k1_v0_9_2_ecdsa_signature_parse_compact(ctx, sig, tmpsig); + overflow = !rustsecp256k1_v0_10_0_ecdsa_signature_parse_compact(ctx, sig, tmpsig); } if (overflow) { memset(tmpsig, 0, 64); - rustsecp256k1_v0_9_2_ecdsa_signature_parse_compact(ctx, sig, tmpsig); + rustsecp256k1_v0_10_0_ecdsa_signature_parse_compact(ctx, sig, tmpsig); } return 1; } diff --git a/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.h b/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.h index 1f64a37bc..c1abda485 100644 --- a/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.h +++ b/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.h @@ -26,8 +26,8 @@ * certain violations are easily supported. You may need to adapt it. * * Do not use this for new systems. Use well-defined DER or compact signatures - * instead if you have the choice (see rustsecp256k1_v0_9_2_ecdsa_signature_parse_der and - * rustsecp256k1_v0_9_2_ecdsa_signature_parse_compact). + * instead if you have the choice (see rustsecp256k1_v0_10_0_ecdsa_signature_parse_der and + * rustsecp256k1_v0_10_0_ecdsa_signature_parse_compact). * * The supported violations are: * - All numbers are parsed as nonnegative integers, even though X.609-0207 @@ -83,9 +83,9 @@ extern "C" { * encoded numbers are out of range, signature validation with it is * guaranteed to fail for every message and public key. */ -int rustsecp256k1_v0_9_2_ecdsa_signature_parse_der_lax( - const rustsecp256k1_v0_9_2_context* ctx, - rustsecp256k1_v0_9_2_ecdsa_signature* sig, +int rustsecp256k1_v0_10_0_ecdsa_signature_parse_der_lax( + const rustsecp256k1_v0_10_0_context* ctx, + rustsecp256k1_v0_10_0_ecdsa_signature* sig, const unsigned char *input, size_t inputlen ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); diff --git a/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.c b/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.c index 08c20727c..f19015b1d 100644 --- a/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.c +++ b/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.c @@ -8,7 +8,7 @@ #include "lax_der_privatekey_parsing.h" -int ec_privkey_import_der(const rustsecp256k1_v0_9_2_context* ctx, unsigned char *out32, const unsigned char *privkey, size_t privkeylen) { +int ec_privkey_import_der(const rustsecp256k1_v0_10_0_context* ctx, unsigned char *out32, const unsigned char *privkey, size_t privkeylen) { const unsigned char *end = privkey + privkeylen; int lenb = 0; int len = 0; @@ -45,17 +45,17 @@ int ec_privkey_import_der(const rustsecp256k1_v0_9_2_context* ctx, unsigned char return 0; } if (privkey[1]) memcpy(out32 + 32 - privkey[1], privkey + 2, privkey[1]); - if (!rustsecp256k1_v0_9_2_ec_seckey_verify(ctx, out32)) { + if (!rustsecp256k1_v0_10_0_ec_seckey_verify(ctx, out32)) { memset(out32, 0, 32); return 0; } return 1; } -int ec_privkey_export_der(const rustsecp256k1_v0_9_2_context *ctx, unsigned char *privkey, size_t *privkeylen, const unsigned char *key32, int compressed) { - rustsecp256k1_v0_9_2_pubkey pubkey; +int ec_privkey_export_der(const rustsecp256k1_v0_10_0_context *ctx, unsigned char *privkey, size_t *privkeylen, const unsigned char *key32, int compressed) { + rustsecp256k1_v0_10_0_pubkey pubkey; size_t pubkeylen = 0; - if (!rustsecp256k1_v0_9_2_ec_pubkey_create(ctx, &pubkey, key32)) { + if (!rustsecp256k1_v0_10_0_ec_pubkey_create(ctx, &pubkey, key32)) { *privkeylen = 0; return 0; } @@ -79,7 +79,7 @@ int ec_privkey_export_der(const rustsecp256k1_v0_9_2_context *ctx, unsigned char memcpy(ptr, key32, 32); ptr += 32; memcpy(ptr, middle, sizeof(middle)); ptr += sizeof(middle); pubkeylen = 33; - rustsecp256k1_v0_9_2_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED); + rustsecp256k1_v0_10_0_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED); ptr += pubkeylen; *privkeylen = ptr - privkey; } else { @@ -104,7 +104,7 @@ int ec_privkey_export_der(const rustsecp256k1_v0_9_2_context *ctx, unsigned char memcpy(ptr, key32, 32); ptr += 32; memcpy(ptr, middle, sizeof(middle)); ptr += sizeof(middle); pubkeylen = 65; - rustsecp256k1_v0_9_2_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey, SECP256K1_EC_UNCOMPRESSED); + rustsecp256k1_v0_10_0_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey, SECP256K1_EC_UNCOMPRESSED); ptr += pubkeylen; *privkeylen = ptr - privkey; } diff --git a/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.h b/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.h index 3974c03b0..69b2555c8 100644 --- a/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.h +++ b/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.h @@ -43,7 +43,7 @@ extern "C" { /** Export a private key in DER format. * * Returns: 1 if the private key was valid. - * Args: ctx: pointer to a context object (not rustsecp256k1_v0_9_2_context_static). + * Args: ctx: pointer to a context object (not rustsecp256k1_v0_10_0_context_static). * Out: privkey: pointer to an array for storing the private key in BER. * Should have space for 279 bytes, and cannot be NULL. * privkeylen: Pointer to an int where the length of the private key in @@ -57,10 +57,10 @@ extern "C" { * simple 32-byte private keys are sufficient. * * Note that this function does not guarantee correct DER output. It is - * guaranteed to be parsable by rustsecp256k1_v0_9_2_ec_privkey_import_der + * guaranteed to be parsable by rustsecp256k1_v0_10_0_ec_privkey_import_der */ SECP256K1_WARN_UNUSED_RESULT int ec_privkey_export_der( - const rustsecp256k1_v0_9_2_context* ctx, + const rustsecp256k1_v0_10_0_context* ctx, unsigned char *privkey, size_t *privkeylen, const unsigned char *seckey, @@ -82,7 +82,7 @@ SECP256K1_WARN_UNUSED_RESULT int ec_privkey_export_der( * key. */ SECP256K1_WARN_UNUSED_RESULT int ec_privkey_import_der( - const rustsecp256k1_v0_9_2_context* ctx, + const rustsecp256k1_v0_10_0_context* ctx, unsigned char *seckey, const unsigned char *privkey, size_t privkeylen diff --git a/secp256k1-sys/depend/secp256k1/doc/ellswift.md b/secp256k1-sys/depend/secp256k1/doc/ellswift.md index 6a0c97009..4c6c0da81 100644 --- a/secp256k1-sys/depend/secp256k1/doc/ellswift.md +++ b/secp256k1-sys/depend/secp256k1/doc/ellswift.md @@ -144,8 +144,8 @@ but the approach here is simple enough and gives fairly uniform output even in t **Note**: in the paper these conditions result in $\infty$ as output, due to the use of projective coordinates there. We wish to avoid the need for callers to deal with this special case. -This is implemented in `rustsecp256k1_v0_9_2_ellswift_xswiftec_frac_var` (which decodes to an x-coordinate represented as a fraction), and -in `rustsecp256k1_v0_9_2_ellswift_xswiftec_var` (which outputs the actual x-coordinate). +This is implemented in `rustsecp256k1_v0_10_0_ellswift_xswiftec_frac_var` (which decodes to an x-coordinate represented as a fraction), and +in `rustsecp256k1_v0_10_0_ellswift_xswiftec_var` (which outputs the actual x-coordinate). ## 3. The encoding function @@ -247,7 +247,7 @@ the loop can be simplified to only compute one of the inverses instead of all of * Let $t = G_{c,u}(x).$ * If $t \neq \bot$, return $(u, t)$; restart loop otherwise. -This is implemented in `rustsecp256k1_v0_9_2_ellswift_xelligatorswift_var`. +This is implemented in `rustsecp256k1_v0_10_0_ellswift_xelligatorswift_var`. ### 3.3 Finding the inverse @@ -388,7 +388,7 @@ Specialized for odd-ordered $a=0$ curves: * If $c \in \\{4, 6\\}:$ return $w(\frac{-\sqrt{-3}+1}{2}u + v).$ * If $c \in \\{5, 7\\}:$ return $w(\frac{-\sqrt{-3}-1}{2}u - v).$ -This is implemented in `rustsecp256k1_v0_9_2_ellswift_xswiftec_inv_var`. +This is implemented in `rustsecp256k1_v0_10_0_ellswift_xswiftec_inv_var`. And the x-only ElligatorSwift encoding algorithm is still: @@ -471,11 +471,11 @@ as decoder: * Let $y = \sqrt{g(x)}.$ * Return $(x, y)$ if $sign(y) = sign(t)$; $(x, -y)$ otherwise. -This is implemented in `rustsecp256k1_v0_9_2_ellswift_swiftec_var`. The used $sign(x)$ function is the parity of $x$ when represented as in integer in $[0,q).$ +This is implemented in `rustsecp256k1_v0_10_0_ellswift_swiftec_var`. The used $sign(x)$ function is the parity of $x$ when represented as in integer in $[0,q).$ The corresponding encoder would invoke the x-only one, but negating the output $t$ if $sign(t) \neq sign(y).$ -This is implemented in `rustsecp256k1_v0_9_2_ellswift_elligatorswift_var`. +This is implemented in `rustsecp256k1_v0_10_0_ellswift_elligatorswift_var`. Note that this is only intended for encoding points where both the x-coordinate and y-coordinate are unpredictable. When encoding x-only points where the y-coordinate is implicitly even (or implicitly square, or implicitly in $[0,q/2]$), the encoder in diff --git a/secp256k1-sys/depend/secp256k1/doc/release-process.md b/secp256k1-sys/depend/secp256k1/doc/release-process.md index ea6087c9f..51e337a5a 100644 --- a/secp256k1-sys/depend/secp256k1/doc/release-process.md +++ b/secp256k1-sys/depend/secp256k1/doc/release-process.md @@ -24,16 +24,21 @@ Perform these checks before creating a release: 2. Check installation with autotools: ```shell dir=$(mktemp -d) -./autogen.sh && ./configure --prefix=$dir && make clean && make install && ls -l $dir/include $dir/lib +./autogen.sh && ./configure --prefix=$dir && make clean && make install && ls -RlAh $dir gcc -o ecdsa examples/ecdsa.c $(PKG_CONFIG_PATH=$dir/lib/pkgconfig pkg-config --cflags --libs libsecp256k1) -Wl,-rpath,"$dir/lib" && ./ecdsa ``` 3. Check installation with CMake: ```shell dir=$(mktemp -d) build=$(mktemp -d) -cmake -B $build -DCMAKE_INSTALL_PREFIX=$dir && cmake --build $build --target install && ls -l $dir/include $dir/lib* +cmake -B $build -DCMAKE_INSTALL_PREFIX=$dir && cmake --build $build --target install && ls -RlAh $dir gcc -o ecdsa examples/ecdsa.c -I $dir/include -L $dir/lib*/ -l secp256k1 -Wl,-rpath,"$dir/lib",-rpath,"$dir/lib64" && ./ecdsa ``` +4. Use the [`check-abi.sh`](/tools/check-abi.sh) tool to ensure there are no unexpected ABI incompatibilities and that the version number and release notes accurately reflect all potential ABI changes. To run this tool, the `abi-dumper` and `abi-compliance-checker` packages are required. + +```shell +tools/check-abi.sh +``` ## Regular release @@ -41,7 +46,7 @@ gcc -o ecdsa examples/ecdsa.c -I $dir/include -L $dir/lib*/ -l secp256k1 -Wl,-rp * finalizes the release notes in [CHANGELOG.md](../CHANGELOG.md) by * adding a section for the release (make sure that the version number is a link to a diff between the previous and new version), * removing the `[Unreleased]` section header, and - * including an entry for `### ABI Compatibility` if it doesn't exist that mentions the library soname of the release, + * including an entry for `### ABI Compatibility` if it doesn't exist, * sets `_PKG_VERSION_IS_RELEASE` to `true` in `configure.ac`, and * if this is not a patch release * updates `_PKG_VERSION_*` and `_LIB_VERSION_*` in `configure.ac` and diff --git a/secp256k1-sys/depend/secp256k1/examples/ecdh.c b/secp256k1-sys/depend/secp256k1/examples/ecdh.c index 3472e4a5d..2f47a5d7f 100644 --- a/secp256k1-sys/depend/secp256k1/examples/ecdh.c +++ b/secp256k1-sys/depend/secp256k1/examples/ecdh.c @@ -26,19 +26,19 @@ int main(void) { unsigned char randomize[32]; int return_val; size_t len; - rustsecp256k1_v0_9_2_pubkey pubkey1; - rustsecp256k1_v0_9_2_pubkey pubkey2; + rustsecp256k1_v0_10_0_pubkey pubkey1; + rustsecp256k1_v0_10_0_pubkey pubkey2; /* Before we can call actual API functions, we need to create a "context". */ - rustsecp256k1_v0_9_2_context* ctx = rustsecp256k1_v0_9_2_context_create(SECP256K1_CONTEXT_NONE); + rustsecp256k1_v0_10_0_context* ctx = rustsecp256k1_v0_10_0_context_create(SECP256K1_CONTEXT_NONE); if (!fill_random(randomize, sizeof(randomize))) { printf("Failed to generate randomness\n"); return 1; } /* Randomizing the context is recommended to protect against side-channel - * leakage See `rustsecp256k1_v0_9_2_context_randomize` in secp256k1.h for more + * leakage See `rustsecp256k1_v0_10_0_context_randomize` in secp256k1.h for more * information about it. This should never fail. */ - return_val = rustsecp256k1_v0_9_2_context_randomize(ctx, randomize); + return_val = rustsecp256k1_v0_10_0_context_randomize(ctx, randomize); assert(return_val); /*** Key Generation ***/ @@ -51,27 +51,27 @@ int main(void) { printf("Failed to generate randomness\n"); return 1; } - if (rustsecp256k1_v0_9_2_ec_seckey_verify(ctx, seckey1) && rustsecp256k1_v0_9_2_ec_seckey_verify(ctx, seckey2)) { + if (rustsecp256k1_v0_10_0_ec_seckey_verify(ctx, seckey1) && rustsecp256k1_v0_10_0_ec_seckey_verify(ctx, seckey2)) { break; } } /* Public key creation using a valid context with a verified secret key should never fail */ - return_val = rustsecp256k1_v0_9_2_ec_pubkey_create(ctx, &pubkey1, seckey1); + return_val = rustsecp256k1_v0_10_0_ec_pubkey_create(ctx, &pubkey1, seckey1); assert(return_val); - return_val = rustsecp256k1_v0_9_2_ec_pubkey_create(ctx, &pubkey2, seckey2); + return_val = rustsecp256k1_v0_10_0_ec_pubkey_create(ctx, &pubkey2, seckey2); assert(return_val); /* Serialize pubkey1 in a compressed form (33 bytes), should always return 1 */ len = sizeof(compressed_pubkey1); - return_val = rustsecp256k1_v0_9_2_ec_pubkey_serialize(ctx, compressed_pubkey1, &len, &pubkey1, SECP256K1_EC_COMPRESSED); + return_val = rustsecp256k1_v0_10_0_ec_pubkey_serialize(ctx, compressed_pubkey1, &len, &pubkey1, SECP256K1_EC_COMPRESSED); assert(return_val); /* Should be the same size as the size of the output, because we passed a 33 byte array. */ assert(len == sizeof(compressed_pubkey1)); /* Serialize pubkey2 in a compressed form (33 bytes) */ len = sizeof(compressed_pubkey2); - return_val = rustsecp256k1_v0_9_2_ec_pubkey_serialize(ctx, compressed_pubkey2, &len, &pubkey2, SECP256K1_EC_COMPRESSED); + return_val = rustsecp256k1_v0_10_0_ec_pubkey_serialize(ctx, compressed_pubkey2, &len, &pubkey2, SECP256K1_EC_COMPRESSED); assert(return_val); /* Should be the same size as the size of the output, because we passed a 33 byte array. */ assert(len == sizeof(compressed_pubkey2)); @@ -80,12 +80,12 @@ int main(void) { /* Perform ECDH with seckey1 and pubkey2. Should never fail with a verified * seckey and valid pubkey */ - return_val = rustsecp256k1_v0_9_2_ecdh(ctx, shared_secret1, &pubkey2, seckey1, NULL, NULL); + return_val = rustsecp256k1_v0_10_0_ecdh(ctx, shared_secret1, &pubkey2, seckey1, NULL, NULL); assert(return_val); /* Perform ECDH with seckey2 and pubkey1. Should never fail with a verified * seckey and valid pubkey */ - return_val = rustsecp256k1_v0_9_2_ecdh(ctx, shared_secret2, &pubkey1, seckey2, NULL, NULL); + return_val = rustsecp256k1_v0_10_0_ecdh(ctx, shared_secret2, &pubkey1, seckey2, NULL, NULL); assert(return_val); /* Both parties should end up with the same shared secret */ @@ -104,7 +104,7 @@ int main(void) { print_hex(shared_secret1, sizeof(shared_secret1)); /* This will clear everything from the context and free the memory */ - rustsecp256k1_v0_9_2_context_destroy(ctx); + rustsecp256k1_v0_10_0_context_destroy(ctx); /* It's best practice to try to clear secrets from memory after using them. * This is done because some bugs can allow an attacker to leak memory, for diff --git a/secp256k1-sys/depend/secp256k1/examples/ecdsa.c b/secp256k1-sys/depend/secp256k1/examples/ecdsa.c index 2ce736d93..ea08558a5 100644 --- a/secp256k1-sys/depend/secp256k1/examples/ecdsa.c +++ b/secp256k1-sys/depend/secp256k1/examples/ecdsa.c @@ -34,18 +34,18 @@ int main(void) { size_t len; int is_signature_valid, is_signature_valid2; int return_val; - rustsecp256k1_v0_9_2_pubkey pubkey; - rustsecp256k1_v0_9_2_ecdsa_signature sig; + rustsecp256k1_v0_10_0_pubkey pubkey; + rustsecp256k1_v0_10_0_ecdsa_signature sig; /* Before we can call actual API functions, we need to create a "context". */ - rustsecp256k1_v0_9_2_context* ctx = rustsecp256k1_v0_9_2_context_create(SECP256K1_CONTEXT_NONE); + rustsecp256k1_v0_10_0_context* ctx = rustsecp256k1_v0_10_0_context_create(SECP256K1_CONTEXT_NONE); if (!fill_random(randomize, sizeof(randomize))) { printf("Failed to generate randomness\n"); return 1; } /* Randomizing the context is recommended to protect against side-channel - * leakage See `rustsecp256k1_v0_9_2_context_randomize` in secp256k1.h for more + * leakage See `rustsecp256k1_v0_10_0_context_randomize` in secp256k1.h for more * information about it. This should never fail. */ - return_val = rustsecp256k1_v0_9_2_context_randomize(ctx, randomize); + return_val = rustsecp256k1_v0_10_0_context_randomize(ctx, randomize); assert(return_val); /*** Key Generation ***/ @@ -58,18 +58,18 @@ int main(void) { printf("Failed to generate randomness\n"); return 1; } - if (rustsecp256k1_v0_9_2_ec_seckey_verify(ctx, seckey)) { + if (rustsecp256k1_v0_10_0_ec_seckey_verify(ctx, seckey)) { break; } } /* Public key creation using a valid context with a verified secret key should never fail */ - return_val = rustsecp256k1_v0_9_2_ec_pubkey_create(ctx, &pubkey, seckey); + return_val = rustsecp256k1_v0_10_0_ec_pubkey_create(ctx, &pubkey, seckey); assert(return_val); /* Serialize the pubkey in a compressed form(33 bytes). Should always return 1. */ len = sizeof(compressed_pubkey); - return_val = rustsecp256k1_v0_9_2_ec_pubkey_serialize(ctx, compressed_pubkey, &len, &pubkey, SECP256K1_EC_COMPRESSED); + return_val = rustsecp256k1_v0_10_0_ec_pubkey_serialize(ctx, compressed_pubkey, &len, &pubkey, SECP256K1_EC_COMPRESSED); assert(return_val); /* Should be the same size as the size of the output, because we passed a 33 byte array. */ assert(len == sizeof(compressed_pubkey)); @@ -80,31 +80,31 @@ int main(void) { * custom nonce function, passing `NULL` will use the RFC-6979 safe default. * Signing with a valid context, verified secret key * and the default nonce function should never fail. */ - return_val = rustsecp256k1_v0_9_2_ecdsa_sign(ctx, &sig, msg_hash, seckey, NULL, NULL); + return_val = rustsecp256k1_v0_10_0_ecdsa_sign(ctx, &sig, msg_hash, seckey, NULL, NULL); assert(return_val); /* Serialize the signature in a compact form. Should always return 1 * according to the documentation in secp256k1.h. */ - return_val = rustsecp256k1_v0_9_2_ecdsa_signature_serialize_compact(ctx, serialized_signature, &sig); + return_val = rustsecp256k1_v0_10_0_ecdsa_signature_serialize_compact(ctx, serialized_signature, &sig); assert(return_val); /*** Verification ***/ /* Deserialize the signature. This will return 0 if the signature can't be parsed correctly. */ - if (!rustsecp256k1_v0_9_2_ecdsa_signature_parse_compact(ctx, &sig, serialized_signature)) { + if (!rustsecp256k1_v0_10_0_ecdsa_signature_parse_compact(ctx, &sig, serialized_signature)) { printf("Failed parsing the signature\n"); return 1; } /* Deserialize the public key. This will return 0 if the public key can't be parsed correctly. */ - if (!rustsecp256k1_v0_9_2_ec_pubkey_parse(ctx, &pubkey, compressed_pubkey, sizeof(compressed_pubkey))) { + if (!rustsecp256k1_v0_10_0_ec_pubkey_parse(ctx, &pubkey, compressed_pubkey, sizeof(compressed_pubkey))) { printf("Failed parsing the public key\n"); return 1; } /* Verify a signature. This will return 1 if it's valid and 0 if it's not. */ - is_signature_valid = rustsecp256k1_v0_9_2_ecdsa_verify(ctx, &sig, msg_hash, &pubkey); + is_signature_valid = rustsecp256k1_v0_10_0_ecdsa_verify(ctx, &sig, msg_hash, &pubkey); printf("Is the signature valid? %s\n", is_signature_valid ? "true" : "false"); printf("Secret Key: "); @@ -115,14 +115,14 @@ int main(void) { print_hex(serialized_signature, sizeof(serialized_signature)); /* This will clear everything from the context and free the memory */ - rustsecp256k1_v0_9_2_context_destroy(ctx); + rustsecp256k1_v0_10_0_context_destroy(ctx); /* Bonus example: if all we need is signature verification (and no key generation or signing), we don't need to use a context created via - rustsecp256k1_v0_9_2_context_create(). We can simply use the static (i.e., global) - context rustsecp256k1_v0_9_2_context_static. See its description in + rustsecp256k1_v0_10_0_context_create(). We can simply use the static (i.e., global) + context rustsecp256k1_v0_10_0_context_static. See its description in include/secp256k1.h for details. */ - is_signature_valid2 = rustsecp256k1_v0_9_2_ecdsa_verify(rustsecp256k1_v0_9_2_context_static, + is_signature_valid2 = rustsecp256k1_v0_10_0_ecdsa_verify(rustsecp256k1_v0_10_0_context_static, &sig, msg_hash, &pubkey); assert(is_signature_valid2 == is_signature_valid); diff --git a/secp256k1-sys/depend/secp256k1/examples/schnorr.c b/secp256k1-sys/depend/secp256k1/examples/schnorr.c index 020de1174..851dabe10 100644 --- a/secp256k1-sys/depend/secp256k1/examples/schnorr.c +++ b/secp256k1-sys/depend/secp256k1/examples/schnorr.c @@ -28,18 +28,18 @@ int main(void) { unsigned char signature[64]; int is_signature_valid, is_signature_valid2; int return_val; - rustsecp256k1_v0_9_2_xonly_pubkey pubkey; - rustsecp256k1_v0_9_2_keypair keypair; + rustsecp256k1_v0_10_0_xonly_pubkey pubkey; + rustsecp256k1_v0_10_0_keypair keypair; /* Before we can call actual API functions, we need to create a "context". */ - rustsecp256k1_v0_9_2_context* ctx = rustsecp256k1_v0_9_2_context_create(SECP256K1_CONTEXT_NONE); + rustsecp256k1_v0_10_0_context* ctx = rustsecp256k1_v0_10_0_context_create(SECP256K1_CONTEXT_NONE); if (!fill_random(randomize, sizeof(randomize))) { printf("Failed to generate randomness\n"); return 1; } /* Randomizing the context is recommended to protect against side-channel - * leakage See `rustsecp256k1_v0_9_2_context_randomize` in secp256k1.h for more + * leakage See `rustsecp256k1_v0_10_0_context_randomize` in secp256k1.h for more * information about it. This should never fail. */ - return_val = rustsecp256k1_v0_9_2_context_randomize(ctx, randomize); + return_val = rustsecp256k1_v0_10_0_context_randomize(ctx, randomize); assert(return_val); /*** Key Generation ***/ @@ -54,21 +54,21 @@ int main(void) { } /* Try to create a keypair with a valid context, it should only fail if * the secret key is zero or out of range. */ - if (rustsecp256k1_v0_9_2_keypair_create(ctx, &keypair, seckey)) { + if (rustsecp256k1_v0_10_0_keypair_create(ctx, &keypair, seckey)) { break; } } /* Extract the X-only public key from the keypair. We pass NULL for * `pk_parity` as the parity isn't needed for signing or verification. - * `rustsecp256k1_v0_9_2_keypair_xonly_pub` supports returning the parity for + * `rustsecp256k1_v0_10_0_keypair_xonly_pub` supports returning the parity for * other use cases such as tests or verifying Taproot tweaks. * This should never fail with a valid context and public key. */ - return_val = rustsecp256k1_v0_9_2_keypair_xonly_pub(ctx, &pubkey, NULL, &keypair); + return_val = rustsecp256k1_v0_10_0_keypair_xonly_pub(ctx, &pubkey, NULL, &keypair); assert(return_val); /* Serialize the public key. Should always return 1 for a valid public key. */ - return_val = rustsecp256k1_v0_9_2_xonly_pubkey_serialize(ctx, serialized_pubkey, &pubkey); + return_val = rustsecp256k1_v0_10_0_xonly_pubkey_serialize(ctx, serialized_pubkey, &pubkey); assert(return_val); /*** Signing ***/ @@ -76,7 +76,7 @@ int main(void) { /* Instead of signing (possibly very long) messages directly, we sign a * 32-byte hash of the message in this example. * - * We use rustsecp256k1_v0_9_2_tagged_sha256 to create this hash. This function expects + * We use rustsecp256k1_v0_10_0_tagged_sha256 to create this hash. This function expects * a context-specific "tag", which restricts the context in which the signed * messages should be considered valid. For example, if protocol A mandates * to use the tag "my_fancy_protocol" and protocol B mandates to use the tag @@ -87,7 +87,7 @@ int main(void) { * message that has intended consequences in the intended context (e.g., * protocol A) but would have unintended consequences if it were valid in * some other context (e.g., protocol B). */ - return_val = rustsecp256k1_v0_9_2_tagged_sha256(ctx, msg_hash, tag, sizeof(tag), msg, sizeof(msg)); + return_val = rustsecp256k1_v0_10_0_tagged_sha256(ctx, msg_hash, tag, sizeof(tag), msg, sizeof(msg)); assert(return_val); /* Generate 32 bytes of randomness to use with BIP-340 schnorr signing. */ @@ -98,30 +98,30 @@ int main(void) { /* Generate a Schnorr signature. * - * We use the rustsecp256k1_v0_9_2_schnorrsig_sign32 function that provides a simple + * We use the rustsecp256k1_v0_10_0_schnorrsig_sign32 function that provides a simple * interface for signing 32-byte messages (which in our case is a hash of * the actual message). BIP-340 recommends passing 32 bytes of randomness * to the signing function to improve security against side-channel attacks. * Signing with a valid context, a 32-byte message, a verified keypair, and * any 32 bytes of auxiliary random data should never fail. */ - return_val = rustsecp256k1_v0_9_2_schnorrsig_sign32(ctx, signature, msg_hash, &keypair, auxiliary_rand); + return_val = rustsecp256k1_v0_10_0_schnorrsig_sign32(ctx, signature, msg_hash, &keypair, auxiliary_rand); assert(return_val); /*** Verification ***/ /* Deserialize the public key. This will return 0 if the public key can't * be parsed correctly */ - if (!rustsecp256k1_v0_9_2_xonly_pubkey_parse(ctx, &pubkey, serialized_pubkey)) { + if (!rustsecp256k1_v0_10_0_xonly_pubkey_parse(ctx, &pubkey, serialized_pubkey)) { printf("Failed parsing the public key\n"); return 1; } /* Compute the tagged hash on the received messages using the same tag as the signer. */ - return_val = rustsecp256k1_v0_9_2_tagged_sha256(ctx, msg_hash, tag, sizeof(tag), msg, sizeof(msg)); + return_val = rustsecp256k1_v0_10_0_tagged_sha256(ctx, msg_hash, tag, sizeof(tag), msg, sizeof(msg)); assert(return_val); /* Verify a signature. This will return 1 if it's valid and 0 if it's not. */ - is_signature_valid = rustsecp256k1_v0_9_2_schnorrsig_verify(ctx, signature, msg_hash, 32, &pubkey); + is_signature_valid = rustsecp256k1_v0_10_0_schnorrsig_verify(ctx, signature, msg_hash, 32, &pubkey); printf("Is the signature valid? %s\n", is_signature_valid ? "true" : "false"); @@ -133,14 +133,14 @@ int main(void) { print_hex(signature, sizeof(signature)); /* This will clear everything from the context and free the memory */ - rustsecp256k1_v0_9_2_context_destroy(ctx); + rustsecp256k1_v0_10_0_context_destroy(ctx); /* Bonus example: if all we need is signature verification (and no key generation or signing), we don't need to use a context created via - rustsecp256k1_v0_9_2_context_create(). We can simply use the static (i.e., global) - context rustsecp256k1_v0_9_2_context_static. See its description in + rustsecp256k1_v0_10_0_context_create(). We can simply use the static (i.e., global) + context rustsecp256k1_v0_10_0_context_static. See its description in include/secp256k1.h for details. */ - is_signature_valid2 = rustsecp256k1_v0_9_2_schnorrsig_verify(rustsecp256k1_v0_9_2_context_static, + is_signature_valid2 = rustsecp256k1_v0_10_0_schnorrsig_verify(rustsecp256k1_v0_10_0_context_static, signature, msg_hash, 32, &pubkey); assert(is_signature_valid2 == is_signature_valid); diff --git a/secp256k1-sys/depend/secp256k1/include/secp256k1.h b/secp256k1-sys/depend/secp256k1/include/secp256k1.h index 317b71871..4120257f9 100644 --- a/secp256k1-sys/depend/secp256k1/include/secp256k1.h +++ b/secp256k1-sys/depend/secp256k1/include/secp256k1.h @@ -29,25 +29,25 @@ extern "C" { * The primary purpose of context objects is to store randomization data for * enhanced protection against side-channel leakage. This protection is only * effective if the context is randomized after its creation. See - * rustsecp256k1_v0_9_2_context_create for creation of contexts and - * rustsecp256k1_v0_9_2_context_randomize for randomization. + * rustsecp256k1_v0_10_0_context_create for creation of contexts and + * rustsecp256k1_v0_10_0_context_randomize for randomization. * * A secondary purpose of context objects is to store pointers to callback * functions that the library will call when certain error states arise. See - * rustsecp256k1_v0_9_2_context_set_error_callback as well as - * rustsecp256k1_v0_9_2_context_set_illegal_callback for details. Future library versions + * rustsecp256k1_v0_10_0_context_set_error_callback as well as + * rustsecp256k1_v0_10_0_context_set_illegal_callback for details. Future library versions * may use context objects for additional purposes. * * A constructed context can safely be used from multiple threads * simultaneously, but API calls that take a non-const pointer to a context * need exclusive access to it. In particular this is the case for - * rustsecp256k1_v0_9_2_context_destroy, rustsecp256k1_v0_9_2_context_preallocated_destroy, - * and rustsecp256k1_v0_9_2_context_randomize. + * rustsecp256k1_v0_10_0_context_destroy, rustsecp256k1_v0_10_0_context_preallocated_destroy, + * and rustsecp256k1_v0_10_0_context_randomize. * * Regarding randomization, either do it once at creation time (in which case * you do not need any locking for the other calls), or use a read-write lock. */ -typedef struct rustsecp256k1_v0_9_2_context_struct rustsecp256k1_v0_9_2_context; +typedef struct rustsecp256k1_v0_10_0_context_struct rustsecp256k1_v0_10_0_context; /** Opaque data structure that holds rewritable "scratch space" * @@ -60,7 +60,7 @@ typedef struct rustsecp256k1_v0_9_2_context_struct rustsecp256k1_v0_9_2_context; * Unlike the context object, this cannot safely be shared between threads * without additional synchronization logic. */ -typedef struct rustsecp256k1_v0_9_2_scratch_space_struct rustsecp256k1_v0_9_2_scratch_space; +typedef struct rustsecp256k1_v0_10_0_scratch_space_struct rustsecp256k1_v0_10_0_scratch_space; /** Opaque data structure that holds a parsed and valid public key. * @@ -68,12 +68,12 @@ typedef struct rustsecp256k1_v0_9_2_scratch_space_struct rustsecp256k1_v0_9_2_sc * guaranteed to be portable between different platforms or versions. It is * however guaranteed to be 64 bytes in size, and can be safely copied/moved. * If you need to convert to a format suitable for storage or transmission, - * use rustsecp256k1_v0_9_2_ec_pubkey_serialize and rustsecp256k1_v0_9_2_ec_pubkey_parse. To - * compare keys, use rustsecp256k1_v0_9_2_ec_pubkey_cmp. + * use rustsecp256k1_v0_10_0_ec_pubkey_serialize and rustsecp256k1_v0_10_0_ec_pubkey_parse. To + * compare keys, use rustsecp256k1_v0_10_0_ec_pubkey_cmp. */ typedef struct { unsigned char data[64]; -} rustsecp256k1_v0_9_2_pubkey; +} rustsecp256k1_v0_10_0_pubkey; /** Opaque data structured that holds a parsed ECDSA signature. * @@ -81,12 +81,12 @@ typedef struct { * guaranteed to be portable between different platforms or versions. It is * however guaranteed to be 64 bytes in size, and can be safely copied/moved. * If you need to convert to a format suitable for storage, transmission, or - * comparison, use the rustsecp256k1_v0_9_2_ecdsa_signature_serialize_* and - * rustsecp256k1_v0_9_2_ecdsa_signature_parse_* functions. + * comparison, use the rustsecp256k1_v0_10_0_ecdsa_signature_serialize_* and + * rustsecp256k1_v0_10_0_ecdsa_signature_parse_* functions. */ typedef struct { unsigned char data[64]; -} rustsecp256k1_v0_9_2_ecdsa_signature; +} rustsecp256k1_v0_10_0_ecdsa_signature; /** A pointer to a function to deterministically generate a nonce. * @@ -104,7 +104,7 @@ typedef struct { * Except for test cases, this function should compute some cryptographic hash of * the message, the algorithm, the key and the attempt. */ -typedef int (*rustsecp256k1_v0_9_2_nonce_function)( +typedef int (*rustsecp256k1_v0_10_0_nonce_function)( unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, @@ -200,8 +200,8 @@ typedef int (*rustsecp256k1_v0_9_2_nonce_function)( #define SECP256K1_FLAGS_BIT_CONTEXT_DECLASSIFY (1 << 10) #define SECP256K1_FLAGS_BIT_COMPRESSION (1 << 8) -/** Context flags to pass to rustsecp256k1_v0_9_2_context_create, rustsecp256k1_v0_9_2_context_preallocated_size, and - * rustsecp256k1_v0_9_2_context_preallocated_create. */ +/** Context flags to pass to rustsecp256k1_v0_10_0_context_create, rustsecp256k1_v0_10_0_context_preallocated_size, and + * rustsecp256k1_v0_10_0_context_preallocated_create. */ #define SECP256K1_CONTEXT_NONE (SECP256K1_FLAGS_TYPE_CONTEXT) /** Deprecated context flags. These flags are treated equivalent to SECP256K1_CONTEXT_NONE. */ @@ -211,7 +211,7 @@ typedef int (*rustsecp256k1_v0_9_2_nonce_function)( /* Testing flag. Do not use. */ #define SECP256K1_CONTEXT_DECLASSIFY (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_DECLASSIFY) -/** Flag to pass to rustsecp256k1_v0_9_2_ec_pubkey_serialize. */ +/** Flag to pass to rustsecp256k1_v0_10_0_ec_pubkey_serialize. */ #define SECP256K1_EC_COMPRESSED (SECP256K1_FLAGS_TYPE_COMPRESSION | SECP256K1_FLAGS_BIT_COMPRESSION) #define SECP256K1_EC_UNCOMPRESSED (SECP256K1_FLAGS_TYPE_COMPRESSION) @@ -223,20 +223,20 @@ typedef int (*rustsecp256k1_v0_9_2_nonce_function)( #define SECP256K1_TAG_PUBKEY_HYBRID_ODD 0x07 /** A built-in constant secp256k1 context object with static storage duration, to be - * used in conjunction with rustsecp256k1_v0_9_2_selftest. + * used in conjunction with rustsecp256k1_v0_10_0_selftest. * * This context object offers *only limited functionality* , i.e., it cannot be used * for API functions that perform computations involving secret keys, e.g., signing * and public key generation. If this restriction applies to a specific API function, - * it is mentioned in its documentation. See rustsecp256k1_v0_9_2_context_create if you need a + * it is mentioned in its documentation. See rustsecp256k1_v0_10_0_context_create if you need a * full context object that supports all functionality offered by the library. * - * It is highly recommended to call rustsecp256k1_v0_9_2_selftest before using this context. + * It is highly recommended to call rustsecp256k1_v0_10_0_selftest before using this context. */ -/** Deprecated alias for rustsecp256k1_v0_9_2_context_static. */ +/** Deprecated alias for rustsecp256k1_v0_10_0_context_static. */ -/** Perform basic self tests (to be used in conjunction with rustsecp256k1_v0_9_2_context_static) +/** Perform basic self tests (to be used in conjunction with rustsecp256k1_v0_10_0_context_static) * * This function performs self tests that detect some serious usage errors and * similar conditions, e.g., when the library is compiled for the wrong endianness. @@ -244,23 +244,23 @@ typedef int (*rustsecp256k1_v0_9_2_nonce_function)( * very rudimentary and are not intended as a replacement for running the test * binaries. * - * It is highly recommended to call this before using rustsecp256k1_v0_9_2_context_static. + * It is highly recommended to call this before using rustsecp256k1_v0_10_0_context_static. * It is not necessary to call this function before using a context created with - * rustsecp256k1_v0_9_2_context_create (or rustsecp256k1_v0_9_2_context_preallocated_create), which will + * rustsecp256k1_v0_10_0_context_create (or rustsecp256k1_v0_10_0_context_preallocated_create), which will * take care of performing the self tests. * * If the tests fail, this function will call the default error handler to abort the - * program (see rustsecp256k1_v0_9_2_context_set_error_callback). + * program (see rustsecp256k1_v0_10_0_context_set_error_callback). */ -SECP256K1_API void rustsecp256k1_v0_9_2_selftest(void); +SECP256K1_API void rustsecp256k1_v0_10_0_selftest(void); /** Create a secp256k1 context object (in dynamically allocated memory). * * This function uses malloc to allocate memory. It is guaranteed that malloc is * called at most once for every call of this function. If you need to avoid dynamic - * memory allocation entirely, see rustsecp256k1_v0_9_2_context_static and the functions in - * rustsecp256k1_v0_9_2_preallocated.h. + * memory allocation entirely, see rustsecp256k1_v0_10_0_context_static and the functions in + * rustsecp256k1_v0_10_0_preallocated.h. * * Returns: a newly created context object. * In: flags: Always set to SECP256K1_CONTEXT_NONE (see below). @@ -273,9 +273,9 @@ SECP256K1_API void rustsecp256k1_v0_9_2_selftest(void); * * If the context is intended to be used for API functions that perform computations * involving secret keys, e.g., signing and public key generation, then it is highly - * recommended to call rustsecp256k1_v0_9_2_context_randomize on the context before calling + * recommended to call rustsecp256k1_v0_10_0_context_randomize on the context before calling * those API functions. This will provide enhanced protection against side-channel - * leakage, see rustsecp256k1_v0_9_2_context_randomize for details. + * leakage, see rustsecp256k1_v0_10_0_context_randomize for details. * * Do not create a new context object for each operation, as construction and * randomization can take non-negligible time. @@ -284,27 +284,27 @@ SECP256K1_API void rustsecp256k1_v0_9_2_selftest(void); * * This function uses malloc to allocate memory. It is guaranteed that malloc is * called at most once for every call of this function. If you need to avoid dynamic - * memory allocation entirely, see the functions in rustsecp256k1_v0_9_2_preallocated.h. + * memory allocation entirely, see the functions in rustsecp256k1_v0_10_0_preallocated.h. * - * Cloning rustsecp256k1_v0_9_2_context_static is not possible, and should not be emulated by + * Cloning rustsecp256k1_v0_10_0_context_static is not possible, and should not be emulated by * the caller (e.g., using memcpy). Create a new context instead. * * Returns: a newly created context object. - * Args: ctx: an existing context to copy (not rustsecp256k1_v0_9_2_context_static) + * Args: ctx: an existing context to copy (not rustsecp256k1_v0_10_0_context_static) */ /** Destroy a secp256k1 context object (created in dynamically allocated memory). * * The context pointer may not be used afterwards. * - * The context to destroy must have been created using rustsecp256k1_v0_9_2_context_create - * or rustsecp256k1_v0_9_2_context_clone. If the context has instead been created using - * rustsecp256k1_v0_9_2_context_preallocated_create or rustsecp256k1_v0_9_2_context_preallocated_clone, the - * behaviour is undefined. In that case, rustsecp256k1_v0_9_2_context_preallocated_destroy must + * The context to destroy must have been created using rustsecp256k1_v0_10_0_context_create + * or rustsecp256k1_v0_10_0_context_clone. If the context has instead been created using + * rustsecp256k1_v0_10_0_context_preallocated_create or rustsecp256k1_v0_10_0_context_preallocated_clone, the + * behaviour is undefined. In that case, rustsecp256k1_v0_10_0_context_preallocated_destroy must * be used instead. * * Args: ctx: an existing context to destroy, constructed using - * rustsecp256k1_v0_9_2_context_create or rustsecp256k1_v0_9_2_context_clone - * (i.e., not rustsecp256k1_v0_9_2_context_static). + * rustsecp256k1_v0_10_0_context_create or rustsecp256k1_v0_10_0_context_clone + * (i.e., not rustsecp256k1_v0_10_0_context_static). */ /** Set a callback function to be called when an illegal argument is passed to * an API call. It will only trigger for violations that are mentioned @@ -327,11 +327,11 @@ SECP256K1_API void rustsecp256k1_v0_9_2_selftest(void); * USE_EXTERNAL_DEFAULT_CALLBACKS is defined, which is the case if the build * has been configured with --enable-external-default-callbacks. Then the * following two symbols must be provided to link against: - * - void rustsecp256k1_v0_9_2_default_illegal_callback_fn(const char *message, void *data); - * - void rustsecp256k1_v0_9_2_default_error_callback_fn(const char *message, void *data); + * - void rustsecp256k1_v0_10_0_default_illegal_callback_fn(const char *message, void *data); + * - void rustsecp256k1_v0_10_0_default_error_callback_fn(const char *message, void *data); * The library can call these default handlers even before a proper callback data - * pointer could have been set using rustsecp256k1_v0_9_2_context_set_illegal_callback or - * rustsecp256k1_v0_9_2_context_set_error_callback, e.g., when the creation of a context + * pointer could have been set using rustsecp256k1_v0_10_0_context_set_illegal_callback or + * rustsecp256k1_v0_10_0_context_set_error_callback, e.g., when the creation of a context * fails. In this case, the corresponding default handler will be called with * the data pointer argument set to NULL. * @@ -341,10 +341,10 @@ SECP256K1_API void rustsecp256k1_v0_9_2_selftest(void); * (NULL restores the default handler.) * data: the opaque pointer to pass to fun above, must be NULL for the default handler. * - * See also rustsecp256k1_v0_9_2_context_set_error_callback. + * See also rustsecp256k1_v0_10_0_context_set_error_callback. */ -SECP256K1_API void rustsecp256k1_v0_9_2_context_set_illegal_callback( - rustsecp256k1_v0_9_2_context *ctx, +SECP256K1_API void rustsecp256k1_v0_10_0_context_set_illegal_callback( + rustsecp256k1_v0_10_0_context *ctx, void (*fun)(const char *message, void *data), const void *data ) SECP256K1_ARG_NONNULL(1); @@ -358,21 +358,21 @@ SECP256K1_API void rustsecp256k1_v0_9_2_context_set_illegal_callback( * This can only trigger in case of a hardware failure, miscompilation, * memory corruption, serious bug in the library, or other error would can * otherwise result in undefined behaviour. It will not trigger due to mere - * incorrect usage of the API (see rustsecp256k1_v0_9_2_context_set_illegal_callback + * incorrect usage of the API (see rustsecp256k1_v0_10_0_context_set_illegal_callback * for that). After this callback returns, anything may happen, including * crashing. * * Args: ctx: an existing context object. * In: fun: a pointer to a function to call when an internal error occurs, * taking a message and an opaque pointer (NULL restores the - * default handler, see rustsecp256k1_v0_9_2_context_set_illegal_callback + * default handler, see rustsecp256k1_v0_10_0_context_set_illegal_callback * for details). * data: the opaque pointer to pass to fun above, must be NULL for the default handler. * - * See also rustsecp256k1_v0_9_2_context_set_illegal_callback. + * See also rustsecp256k1_v0_10_0_context_set_illegal_callback. */ -SECP256K1_API void rustsecp256k1_v0_9_2_context_set_error_callback( - rustsecp256k1_v0_9_2_context *ctx, +SECP256K1_API void rustsecp256k1_v0_10_0_context_set_error_callback( + rustsecp256k1_v0_10_0_context *ctx, void (*fun)(const char *message, void *data), const void *data ) SECP256K1_ARG_NONNULL(1); @@ -404,9 +404,9 @@ SECP256K1_API void rustsecp256k1_v0_9_2_context_set_error_callback( * 0x03), uncompressed (65 bytes, header byte 0x04), or hybrid (65 bytes, header * byte 0x06 or 0x07) format public keys. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ec_pubkey_parse( - const rustsecp256k1_v0_9_2_context *ctx, - rustsecp256k1_v0_9_2_pubkey *pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_10_0_ec_pubkey_parse( + const rustsecp256k1_v0_10_0_context *ctx, + rustsecp256k1_v0_10_0_pubkey *pubkey, const unsigned char *input, size_t inputlen ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -421,16 +421,16 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ec_pubkey_pa * In/Out: outputlen: a pointer to an integer which is initially set to the * size of output, and is overwritten with the written * size. - * In: pubkey: a pointer to a rustsecp256k1_v0_9_2_pubkey containing an + * In: pubkey: a pointer to a rustsecp256k1_v0_10_0_pubkey containing an * initialized public key. * flags: SECP256K1_EC_COMPRESSED if serialization should be in * compressed format, otherwise SECP256K1_EC_UNCOMPRESSED. */ -SECP256K1_API int rustsecp256k1_v0_9_2_ec_pubkey_serialize( - const rustsecp256k1_v0_9_2_context *ctx, +SECP256K1_API int rustsecp256k1_v0_10_0_ec_pubkey_serialize( + const rustsecp256k1_v0_10_0_context *ctx, unsigned char *output, size_t *outputlen, - const rustsecp256k1_v0_9_2_pubkey *pubkey, + const rustsecp256k1_v0_10_0_pubkey *pubkey, unsigned int flags ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); @@ -443,10 +443,10 @@ SECP256K1_API int rustsecp256k1_v0_9_2_ec_pubkey_serialize( * In: pubkey1: first public key to compare * pubkey2: second public key to compare */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ec_pubkey_cmp( - const rustsecp256k1_v0_9_2_context *ctx, - const rustsecp256k1_v0_9_2_pubkey *pubkey1, - const rustsecp256k1_v0_9_2_pubkey *pubkey2 +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_10_0_ec_pubkey_cmp( + const rustsecp256k1_v0_10_0_context *ctx, + const rustsecp256k1_v0_10_0_pubkey *pubkey1, + const rustsecp256k1_v0_10_0_pubkey *pubkey2 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); /** Parse an ECDSA signature in compact (64 bytes) format. @@ -464,9 +464,9 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ec_pubkey_cm * S are zero, the resulting sig value is guaranteed to fail verification for * any message and public key. */ -SECP256K1_API int rustsecp256k1_v0_9_2_ecdsa_signature_parse_compact( - const rustsecp256k1_v0_9_2_context *ctx, - rustsecp256k1_v0_9_2_ecdsa_signature *sig, +SECP256K1_API int rustsecp256k1_v0_10_0_ecdsa_signature_parse_compact( + const rustsecp256k1_v0_10_0_context *ctx, + rustsecp256k1_v0_10_0_ecdsa_signature *sig, const unsigned char *input64 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -485,9 +485,9 @@ SECP256K1_API int rustsecp256k1_v0_9_2_ecdsa_signature_parse_compact( * encoded numbers are out of range, signature verification with it is * guaranteed to fail for every message and public key. */ -SECP256K1_API int rustsecp256k1_v0_9_2_ecdsa_signature_parse_der( - const rustsecp256k1_v0_9_2_context *ctx, - rustsecp256k1_v0_9_2_ecdsa_signature *sig, +SECP256K1_API int rustsecp256k1_v0_10_0_ecdsa_signature_parse_der( + const rustsecp256k1_v0_10_0_context *ctx, + rustsecp256k1_v0_10_0_ecdsa_signature *sig, const unsigned char *input, size_t inputlen ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -503,11 +503,11 @@ SECP256K1_API int rustsecp256k1_v0_9_2_ecdsa_signature_parse_der( * if 0 was returned). * In: sig: a pointer to an initialized signature object */ -SECP256K1_API int rustsecp256k1_v0_9_2_ecdsa_signature_serialize_der( - const rustsecp256k1_v0_9_2_context *ctx, +SECP256K1_API int rustsecp256k1_v0_10_0_ecdsa_signature_serialize_der( + const rustsecp256k1_v0_10_0_context *ctx, unsigned char *output, size_t *outputlen, - const rustsecp256k1_v0_9_2_ecdsa_signature *sig + const rustsecp256k1_v0_10_0_ecdsa_signature *sig ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); /** Serialize an ECDSA signature in compact (64 byte) format. @@ -517,12 +517,12 @@ SECP256K1_API int rustsecp256k1_v0_9_2_ecdsa_signature_serialize_der( * Out: output64: a pointer to a 64-byte array to store the compact serialization * In: sig: a pointer to an initialized signature object * - * See rustsecp256k1_v0_9_2_ecdsa_signature_parse_compact for details about the encoding. + * See rustsecp256k1_v0_10_0_ecdsa_signature_parse_compact for details about the encoding. */ -SECP256K1_API int rustsecp256k1_v0_9_2_ecdsa_signature_serialize_compact( - const rustsecp256k1_v0_9_2_context *ctx, +SECP256K1_API int rustsecp256k1_v0_10_0_ecdsa_signature_serialize_compact( + const rustsecp256k1_v0_10_0_context *ctx, unsigned char *output64, - const rustsecp256k1_v0_9_2_ecdsa_signature *sig + const rustsecp256k1_v0_10_0_ecdsa_signature *sig ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); /** Verify an ECDSA signature. @@ -545,16 +545,16 @@ SECP256K1_API int rustsecp256k1_v0_9_2_ecdsa_signature_serialize_compact( * form are accepted. * * If you need to accept ECDSA signatures from sources that do not obey this - * rule, apply rustsecp256k1_v0_9_2_ecdsa_signature_normalize to the signature prior to + * rule, apply rustsecp256k1_v0_10_0_ecdsa_signature_normalize to the signature prior to * verification, but be aware that doing so results in malleable signatures. * * For details, see the comments for that function. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ecdsa_verify( - const rustsecp256k1_v0_9_2_context *ctx, - const rustsecp256k1_v0_9_2_ecdsa_signature *sig, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_10_0_ecdsa_verify( + const rustsecp256k1_v0_10_0_context *ctx, + const rustsecp256k1_v0_10_0_ecdsa_signature *sig, const unsigned char *msghash32, - const rustsecp256k1_v0_9_2_pubkey *pubkey + const rustsecp256k1_v0_10_0_pubkey *pubkey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); /** Convert a signature to a normalized lower-S form. @@ -593,15 +593,15 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ecdsa_verify * accept various non-unique encodings, so care should be taken when this * property is required for an application. * - * The rustsecp256k1_v0_9_2_ecdsa_sign function will by default create signatures in the - * lower-S form, and rustsecp256k1_v0_9_2_ecdsa_verify will not accept others. In case + * The rustsecp256k1_v0_10_0_ecdsa_sign function will by default create signatures in the + * lower-S form, and rustsecp256k1_v0_10_0_ecdsa_verify will not accept others. In case * signatures come from a system that cannot enforce this property, - * rustsecp256k1_v0_9_2_ecdsa_signature_normalize must be called before verification. + * rustsecp256k1_v0_10_0_ecdsa_signature_normalize must be called before verification. */ -SECP256K1_API int rustsecp256k1_v0_9_2_ecdsa_signature_normalize( - const rustsecp256k1_v0_9_2_context *ctx, - rustsecp256k1_v0_9_2_ecdsa_signature *sigout, - const rustsecp256k1_v0_9_2_ecdsa_signature *sigin +SECP256K1_API int rustsecp256k1_v0_10_0_ecdsa_signature_normalize( + const rustsecp256k1_v0_10_0_context *ctx, + rustsecp256k1_v0_10_0_ecdsa_signature *sigout, + const rustsecp256k1_v0_10_0_ecdsa_signature *sigin ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(3); /** An implementation of RFC6979 (using HMAC-SHA256) as nonce generation function. @@ -609,32 +609,32 @@ SECP256K1_API int rustsecp256k1_v0_9_2_ecdsa_signature_normalize( * extra entropy. */ -/** A default safe nonce generation function (currently equal to rustsecp256k1_v0_9_2_nonce_function_rfc6979). */ +/** A default safe nonce generation function (currently equal to rustsecp256k1_v0_10_0_nonce_function_rfc6979). */ /** Create an ECDSA signature. * * Returns: 1: signature created * 0: the nonce generation function failed, or the secret key was invalid. - * Args: ctx: pointer to a context object (not rustsecp256k1_v0_9_2_context_static). + * Args: ctx: pointer to a context object (not rustsecp256k1_v0_10_0_context_static). * Out: sig: pointer to an array where the signature will be placed. * In: msghash32: the 32-byte message hash being signed. * seckey: pointer to a 32-byte secret key. * noncefp: pointer to a nonce generation function. If NULL, - * rustsecp256k1_v0_9_2_nonce_function_default is used. + * rustsecp256k1_v0_10_0_nonce_function_default is used. * ndata: pointer to arbitrary data used by the nonce generation function * (can be NULL). If it is non-NULL and - * rustsecp256k1_v0_9_2_nonce_function_default is used, then ndata must be a + * rustsecp256k1_v0_10_0_nonce_function_default is used, then ndata must be a * pointer to 32-bytes of additional data. * * The created signature is always in lower-S form. See - * rustsecp256k1_v0_9_2_ecdsa_signature_normalize for more details. + * rustsecp256k1_v0_10_0_ecdsa_signature_normalize for more details. */ -SECP256K1_API int rustsecp256k1_v0_9_2_ecdsa_sign( - const rustsecp256k1_v0_9_2_context *ctx, - rustsecp256k1_v0_9_2_ecdsa_signature *sig, +SECP256K1_API int rustsecp256k1_v0_10_0_ecdsa_sign( + const rustsecp256k1_v0_10_0_context *ctx, + rustsecp256k1_v0_10_0_ecdsa_signature *sig, const unsigned char *msghash32, const unsigned char *seckey, - rustsecp256k1_v0_9_2_nonce_function noncefp, + rustsecp256k1_v0_10_0_nonce_function noncefp, const void *ndata ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); @@ -650,8 +650,8 @@ SECP256K1_API int rustsecp256k1_v0_9_2_ecdsa_sign( * Args: ctx: pointer to a context object. * In: seckey: pointer to a 32-byte secret key. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ec_seckey_verify( - const rustsecp256k1_v0_9_2_context *ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_10_0_ec_seckey_verify( + const rustsecp256k1_v0_10_0_context *ctx, const unsigned char *seckey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); @@ -659,38 +659,38 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ec_seckey_ve * * Returns: 1: secret was valid, public key stores. * 0: secret was invalid, try again. - * Args: ctx: pointer to a context object (not rustsecp256k1_v0_9_2_context_static). + * Args: ctx: pointer to a context object (not rustsecp256k1_v0_10_0_context_static). * Out: pubkey: pointer to the created public key. * In: seckey: pointer to a 32-byte secret key. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ec_pubkey_create( - const rustsecp256k1_v0_9_2_context *ctx, - rustsecp256k1_v0_9_2_pubkey *pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_10_0_ec_pubkey_create( + const rustsecp256k1_v0_10_0_context *ctx, + rustsecp256k1_v0_10_0_pubkey *pubkey, const unsigned char *seckey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); /** Negates a secret key in place. * * Returns: 0 if the given secret key is invalid according to - * rustsecp256k1_v0_9_2_ec_seckey_verify. 1 otherwise + * rustsecp256k1_v0_10_0_ec_seckey_verify. 1 otherwise * Args: ctx: pointer to a context object * In/Out: seckey: pointer to the 32-byte secret key to be negated. If the * secret key is invalid according to - * rustsecp256k1_v0_9_2_ec_seckey_verify, this function returns 0 and + * rustsecp256k1_v0_10_0_ec_seckey_verify, this function returns 0 and * seckey will be set to some unspecified value. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ec_seckey_negate( - const rustsecp256k1_v0_9_2_context *ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_10_0_ec_seckey_negate( + const rustsecp256k1_v0_10_0_context *ctx, unsigned char *seckey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); -/** Same as rustsecp256k1_v0_9_2_ec_seckey_negate, but DEPRECATED. Will be removed in +/** Same as rustsecp256k1_v0_10_0_ec_seckey_negate, but DEPRECATED. Will be removed in * future versions. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ec_privkey_negate( - const rustsecp256k1_v0_9_2_context *ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_10_0_ec_privkey_negate( + const rustsecp256k1_v0_10_0_context *ctx, unsigned char *seckey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) - SECP256K1_DEPRECATED("Use rustsecp256k1_v0_9_2_ec_seckey_negate instead"); + SECP256K1_DEPRECATED("Use rustsecp256k1_v0_10_0_ec_seckey_negate instead"); /** Negates a public key in place. * @@ -698,9 +698,9 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ec_privkey_n * Args: ctx: pointer to a context object * In/Out: pubkey: pointer to the public key to be negated. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ec_pubkey_negate( - const rustsecp256k1_v0_9_2_context *ctx, - rustsecp256k1_v0_9_2_pubkey *pubkey +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_10_0_ec_pubkey_negate( + const rustsecp256k1_v0_10_0_context *ctx, + rustsecp256k1_v0_10_0_pubkey *pubkey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); /** Tweak a secret key by adding tweak to it. @@ -710,28 +710,28 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ec_pubkey_ne * otherwise. * Args: ctx: pointer to a context object. * In/Out: seckey: pointer to a 32-byte secret key. If the secret key is - * invalid according to rustsecp256k1_v0_9_2_ec_seckey_verify, this + * invalid according to rustsecp256k1_v0_10_0_ec_seckey_verify, this * function returns 0. seckey will be set to some unspecified * value if this function returns 0. * In: tweak32: pointer to a 32-byte tweak, which must be valid according to - * rustsecp256k1_v0_9_2_ec_seckey_verify or 32 zero bytes. For uniformly + * rustsecp256k1_v0_10_0_ec_seckey_verify or 32 zero bytes. For uniformly * random 32-byte tweaks, the chance of being invalid is * negligible (around 1 in 2^128). */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ec_seckey_tweak_add( - const rustsecp256k1_v0_9_2_context *ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_10_0_ec_seckey_tweak_add( + const rustsecp256k1_v0_10_0_context *ctx, unsigned char *seckey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); -/** Same as rustsecp256k1_v0_9_2_ec_seckey_tweak_add, but DEPRECATED. Will be removed in +/** Same as rustsecp256k1_v0_10_0_ec_seckey_tweak_add, but DEPRECATED. Will be removed in * future versions. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ec_privkey_tweak_add( - const rustsecp256k1_v0_9_2_context *ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_10_0_ec_privkey_tweak_add( + const rustsecp256k1_v0_10_0_context *ctx, unsigned char *seckey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) - SECP256K1_DEPRECATED("Use rustsecp256k1_v0_9_2_ec_seckey_tweak_add instead"); + SECP256K1_DEPRECATED("Use rustsecp256k1_v0_10_0_ec_seckey_tweak_add instead"); /** Tweak a public key by adding tweak times the generator to it. * @@ -742,13 +742,13 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ec_privkey_t * In/Out: pubkey: pointer to a public key object. pubkey will be set to an * invalid value if this function returns 0. * In: tweak32: pointer to a 32-byte tweak, which must be valid according to - * rustsecp256k1_v0_9_2_ec_seckey_verify or 32 zero bytes. For uniformly + * rustsecp256k1_v0_10_0_ec_seckey_verify or 32 zero bytes. For uniformly * random 32-byte tweaks, the chance of being invalid is * negligible (around 1 in 2^128). */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ec_pubkey_tweak_add( - const rustsecp256k1_v0_9_2_context *ctx, - rustsecp256k1_v0_9_2_pubkey *pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_10_0_ec_pubkey_tweak_add( + const rustsecp256k1_v0_10_0_context *ctx, + rustsecp256k1_v0_10_0_pubkey *pubkey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -757,28 +757,28 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ec_pubkey_tw * Returns: 0 if the arguments are invalid. 1 otherwise. * Args: ctx: pointer to a context object. * In/Out: seckey: pointer to a 32-byte secret key. If the secret key is - * invalid according to rustsecp256k1_v0_9_2_ec_seckey_verify, this + * invalid according to rustsecp256k1_v0_10_0_ec_seckey_verify, this * function returns 0. seckey will be set to some unspecified * value if this function returns 0. * In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according to - * rustsecp256k1_v0_9_2_ec_seckey_verify, this function returns 0. For + * rustsecp256k1_v0_10_0_ec_seckey_verify, this function returns 0. For * uniformly random 32-byte arrays the chance of being invalid * is negligible (around 1 in 2^128). */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ec_seckey_tweak_mul( - const rustsecp256k1_v0_9_2_context *ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_10_0_ec_seckey_tweak_mul( + const rustsecp256k1_v0_10_0_context *ctx, unsigned char *seckey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); -/** Same as rustsecp256k1_v0_9_2_ec_seckey_tweak_mul, but DEPRECATED. Will be removed in +/** Same as rustsecp256k1_v0_10_0_ec_seckey_tweak_mul, but DEPRECATED. Will be removed in * future versions. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ec_privkey_tweak_mul( - const rustsecp256k1_v0_9_2_context *ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_10_0_ec_privkey_tweak_mul( + const rustsecp256k1_v0_10_0_context *ctx, unsigned char *seckey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) - SECP256K1_DEPRECATED("Use rustsecp256k1_v0_9_2_ec_seckey_tweak_mul instead"); + SECP256K1_DEPRECATED("Use rustsecp256k1_v0_10_0_ec_seckey_tweak_mul instead"); /** Tweak a public key by multiplying it by a tweak value. * @@ -787,13 +787,13 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ec_privkey_t * In/Out: pubkey: pointer to a public key object. pubkey will be set to an * invalid value if this function returns 0. * In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according to - * rustsecp256k1_v0_9_2_ec_seckey_verify, this function returns 0. For + * rustsecp256k1_v0_10_0_ec_seckey_verify, this function returns 0. For * uniformly random 32-byte arrays the chance of being invalid * is negligible (around 1 in 2^128). */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ec_pubkey_tweak_mul( - const rustsecp256k1_v0_9_2_context *ctx, - rustsecp256k1_v0_9_2_pubkey *pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_10_0_ec_pubkey_tweak_mul( + const rustsecp256k1_v0_10_0_context *ctx, + rustsecp256k1_v0_10_0_pubkey *pubkey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -801,7 +801,7 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ec_pubkey_tw * * Returns: 1: randomization successful * 0: error - * Args: ctx: pointer to a context object (not rustsecp256k1_v0_9_2_context_static). + * Args: ctx: pointer to a context object (not rustsecp256k1_v0_10_0_context_static). * In: seed32: pointer to a 32-byte random seed (NULL resets to initial state). * * While secp256k1 code is written and tested to be constant-time no matter what @@ -812,25 +812,25 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ec_pubkey_tw * certain computations which involve secret keys. * * It is highly recommended to call this function on contexts returned from - * rustsecp256k1_v0_9_2_context_create or rustsecp256k1_v0_9_2_context_clone (or from the corresponding - * functions in rustsecp256k1_v0_9_2_preallocated.h) before using these contexts to call API + * rustsecp256k1_v0_10_0_context_create or rustsecp256k1_v0_10_0_context_clone (or from the corresponding + * functions in rustsecp256k1_v0_10_0_preallocated.h) before using these contexts to call API * functions that perform computations involving secret keys, e.g., signing and * public key generation. It is possible to call this function more than once on * the same context, and doing so before every few computations involving secret * keys is recommended as a defense-in-depth measure. Randomization of the static - * context rustsecp256k1_v0_9_2_context_static is not supported. + * context rustsecp256k1_v0_10_0_context_static is not supported. * * Currently, the random seed is mainly used for blinding multiplications of a * secret scalar with the elliptic curve base point. Multiplications of this * kind are performed by exactly those API functions which are documented to - * require a context that is not rustsecp256k1_v0_9_2_context_static. As a rule of thumb, + * require a context that is not rustsecp256k1_v0_10_0_context_static. As a rule of thumb, * these are all functions which take a secret key (or a keypair) as an input. * A notable exception to that rule is the ECDH module, which relies on a different * kind of elliptic curve point multiplication and thus does not benefit from * enhanced protection against side-channel leakage currently. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_context_randomize( - rustsecp256k1_v0_9_2_context *ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_10_0_context_randomize( + rustsecp256k1_v0_10_0_context *ctx, const unsigned char *seed32 ) SECP256K1_ARG_NONNULL(1); @@ -843,10 +843,10 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_context_rand * In: ins: pointer to array of pointers to public keys. * n: the number of public keys to add together (must be at least 1). */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ec_pubkey_combine( - const rustsecp256k1_v0_9_2_context *ctx, - rustsecp256k1_v0_9_2_pubkey *out, - const rustsecp256k1_v0_9_2_pubkey * const *ins, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_10_0_ec_pubkey_combine( + const rustsecp256k1_v0_10_0_context *ctx, + rustsecp256k1_v0_10_0_pubkey *out, + const rustsecp256k1_v0_10_0_pubkey * const *ins, size_t n ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -866,8 +866,8 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ec_pubkey_co * msg: pointer to an array containing the message * msglen: length of the message array */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_tagged_sha256( - const rustsecp256k1_v0_9_2_context *ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_10_0_tagged_sha256( + const rustsecp256k1_v0_10_0_context *ctx, unsigned char *hash32, const unsigned char *tag, size_t taglen, diff --git a/secp256k1-sys/depend/secp256k1/include/secp256k1_ecdh.h b/secp256k1-sys/depend/secp256k1/include/secp256k1_ecdh.h index cd08beb92..99a53cf56 100644 --- a/secp256k1-sys/depend/secp256k1/include/secp256k1_ecdh.h +++ b/secp256k1-sys/depend/secp256k1/include/secp256k1_ecdh.h @@ -10,15 +10,15 @@ extern "C" { /** A pointer to a function that hashes an EC point to obtain an ECDH secret * * Returns: 1 if the point was successfully hashed. - * 0 will cause rustsecp256k1_v0_9_2_ecdh to fail and return 0. + * 0 will cause rustsecp256k1_v0_10_0_ecdh to fail and return 0. * Other return values are not allowed, and the behaviour of - * rustsecp256k1_v0_9_2_ecdh is undefined for other return values. + * rustsecp256k1_v0_10_0_ecdh is undefined for other return values. * Out: output: pointer to an array to be filled by the function * In: x32: pointer to a 32-byte x coordinate * y32: pointer to a 32-byte y coordinate * data: arbitrary data pointer that is passed through */ -typedef int (*rustsecp256k1_v0_9_2_ecdh_hash_function)( +typedef int (*rustsecp256k1_v0_10_0_ecdh_hash_function)( unsigned char *output, const unsigned char *x32, const unsigned char *y32, @@ -27,11 +27,11 @@ typedef int (*rustsecp256k1_v0_9_2_ecdh_hash_function)( /** An implementation of SHA256 hash function that applies to compressed public key. * Populates the output parameter with 32 bytes. */ -SECP256K1_API const rustsecp256k1_v0_9_2_ecdh_hash_function rustsecp256k1_v0_9_2_ecdh_hash_function_sha256; +SECP256K1_API const rustsecp256k1_v0_10_0_ecdh_hash_function rustsecp256k1_v0_10_0_ecdh_hash_function_sha256; -/** A default ECDH hash function (currently equal to rustsecp256k1_v0_9_2_ecdh_hash_function_sha256). +/** A default ECDH hash function (currently equal to rustsecp256k1_v0_10_0_ecdh_hash_function_sha256). * Populates the output parameter with 32 bytes. */ -SECP256K1_API const rustsecp256k1_v0_9_2_ecdh_hash_function rustsecp256k1_v0_9_2_ecdh_hash_function_default; +SECP256K1_API const rustsecp256k1_v0_10_0_ecdh_hash_function rustsecp256k1_v0_10_0_ecdh_hash_function_default; /** Compute an EC Diffie-Hellman secret in constant time * @@ -39,20 +39,20 @@ SECP256K1_API const rustsecp256k1_v0_9_2_ecdh_hash_function rustsecp256k1_v0_9_2 * 0: scalar was invalid (zero or overflow) or hashfp returned 0 * Args: ctx: pointer to a context object. * Out: output: pointer to an array to be filled by hashfp. - * In: pubkey: a pointer to a rustsecp256k1_v0_9_2_pubkey containing an initialized public key. + * In: pubkey: a pointer to a rustsecp256k1_v0_10_0_pubkey containing an initialized public key. * seckey: a 32-byte scalar with which to multiply the point. * hashfp: pointer to a hash function. If NULL, - * rustsecp256k1_v0_9_2_ecdh_hash_function_sha256 is used + * rustsecp256k1_v0_10_0_ecdh_hash_function_sha256 is used * (in which case, 32 bytes will be written to output). * data: arbitrary data pointer that is passed through to hashfp - * (can be NULL for rustsecp256k1_v0_9_2_ecdh_hash_function_sha256). + * (can be NULL for rustsecp256k1_v0_10_0_ecdh_hash_function_sha256). */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ecdh( - const rustsecp256k1_v0_9_2_context *ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_10_0_ecdh( + const rustsecp256k1_v0_10_0_context *ctx, unsigned char *output, - const rustsecp256k1_v0_9_2_pubkey *pubkey, + const rustsecp256k1_v0_10_0_pubkey *pubkey, const unsigned char *seckey, - rustsecp256k1_v0_9_2_ecdh_hash_function hashfp, + rustsecp256k1_v0_10_0_ecdh_hash_function hashfp, void *data ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); diff --git a/secp256k1-sys/depend/secp256k1/include/secp256k1_ellswift.h b/secp256k1-sys/depend/secp256k1/include/secp256k1_ellswift.h index 2812b1f15..5d4b9ac7b 100644 --- a/secp256k1-sys/depend/secp256k1/include/secp256k1_ellswift.h +++ b/secp256k1-sys/depend/secp256k1/include/secp256k1_ellswift.h @@ -45,13 +45,13 @@ extern "C" { * For mathematical background about the scheme, see the doc/ellswift.md file. */ -/** A pointer to a function used by rustsecp256k1_v0_9_2_ellswift_xdh to hash the shared X +/** A pointer to a function used by rustsecp256k1_v0_10_0_ellswift_xdh to hash the shared X * coordinate along with the encoded public keys to a uniform shared secret. * * Returns: 1 if a shared secret was successfully computed. - * 0 will cause rustsecp256k1_v0_9_2_ellswift_xdh to fail and return 0. + * 0 will cause rustsecp256k1_v0_10_0_ellswift_xdh to fail and return 0. * Other return values are not allowed, and the behaviour of - * rustsecp256k1_v0_9_2_ellswift_xdh is undefined for other return values. + * rustsecp256k1_v0_10_0_ellswift_xdh is undefined for other return values. * Out: output: pointer to an array to be filled by the function * In: x32: pointer to the 32-byte serialized X coordinate * of the resulting shared point (will not be NULL) @@ -61,7 +61,7 @@ extern "C" { * (will not be NULL) * data: arbitrary data pointer that is passed through */ -typedef int (*rustsecp256k1_v0_9_2_ellswift_xdh_hash_function)( +typedef int (*rustsecp256k1_v0_10_0_ellswift_xdh_hash_function)( unsigned char *output, const unsigned char *x32, const unsigned char *ell_a64, @@ -69,25 +69,25 @@ typedef int (*rustsecp256k1_v0_9_2_ellswift_xdh_hash_function)( void *data ); -/** An implementation of an rustsecp256k1_v0_9_2_ellswift_xdh_hash_function which uses +/** An implementation of an rustsecp256k1_v0_10_0_ellswift_xdh_hash_function which uses * SHA256(prefix64 || ell_a64 || ell_b64 || x32), where prefix64 is the 64-byte * array pointed to by data. */ -SECP256K1_API const rustsecp256k1_v0_9_2_ellswift_xdh_hash_function rustsecp256k1_v0_9_2_ellswift_xdh_hash_function_prefix; +SECP256K1_API const rustsecp256k1_v0_10_0_ellswift_xdh_hash_function rustsecp256k1_v0_10_0_ellswift_xdh_hash_function_prefix; -/** An implementation of an rustsecp256k1_v0_9_2_ellswift_xdh_hash_function compatible with +/** An implementation of an rustsecp256k1_v0_10_0_ellswift_xdh_hash_function compatible with * BIP324. It returns H_tag(ell_a64 || ell_b64 || x32), where H_tag is the * BIP340 tagged hash function with tag "bip324_ellswift_xonly_ecdh". Equivalent - * to rustsecp256k1_v0_9_2_ellswift_xdh_hash_function_prefix with prefix64 set to + * to rustsecp256k1_v0_10_0_ellswift_xdh_hash_function_prefix with prefix64 set to * SHA256("bip324_ellswift_xonly_ecdh")||SHA256("bip324_ellswift_xonly_ecdh"). * The data argument is ignored. */ -SECP256K1_API const rustsecp256k1_v0_9_2_ellswift_xdh_hash_function rustsecp256k1_v0_9_2_ellswift_xdh_hash_function_bip324; +SECP256K1_API const rustsecp256k1_v0_10_0_ellswift_xdh_hash_function rustsecp256k1_v0_10_0_ellswift_xdh_hash_function_bip324; /** Construct a 64-byte ElligatorSwift encoding of a given pubkey. * * Returns: 1 always. * Args: ctx: pointer to a context object * Out: ell64: pointer to a 64-byte array to be filled - * In: pubkey: a pointer to a rustsecp256k1_v0_9_2_pubkey containing an + * In: pubkey: a pointer to a rustsecp256k1_v0_10_0_pubkey containing an * initialized public key * rnd32: pointer to 32 bytes of randomness * @@ -104,10 +104,10 @@ SECP256K1_API const rustsecp256k1_v0_9_2_ellswift_xdh_hash_function rustsecp256k * * This function runs in variable time. */ -SECP256K1_API int rustsecp256k1_v0_9_2_ellswift_encode( - const rustsecp256k1_v0_9_2_context *ctx, +SECP256K1_API int rustsecp256k1_v0_10_0_ellswift_encode( + const rustsecp256k1_v0_10_0_context *ctx, unsigned char *ell64, - const rustsecp256k1_v0_9_2_pubkey *pubkey, + const rustsecp256k1_v0_10_0_pubkey *pubkey, const unsigned char *rnd32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); @@ -115,14 +115,14 @@ SECP256K1_API int rustsecp256k1_v0_9_2_ellswift_encode( * * Returns: always 1 * Args: ctx: pointer to a context object - * Out: pubkey: pointer to a rustsecp256k1_v0_9_2_pubkey that will be filled + * Out: pubkey: pointer to a rustsecp256k1_v0_10_0_pubkey that will be filled * In: ell64: pointer to a 64-byte array to decode * * This function runs in variable time. */ -SECP256K1_API int rustsecp256k1_v0_9_2_ellswift_decode( - const rustsecp256k1_v0_9_2_context *ctx, - rustsecp256k1_v0_9_2_pubkey *pubkey, +SECP256K1_API int rustsecp256k1_v0_10_0_ellswift_decode( + const rustsecp256k1_v0_10_0_context *ctx, + rustsecp256k1_v0_10_0_pubkey *pubkey, const unsigned char *ell64 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -141,18 +141,18 @@ SECP256K1_API int rustsecp256k1_v0_9_2_ellswift_decode( * It is recommended that auxrnd32 contains 32 uniformly random bytes, though * it is optional (and does result in encodings that are indistinguishable from * uniform even without any auxrnd32). It differs from the (mandatory) rnd32 - * argument to rustsecp256k1_v0_9_2_ellswift_encode in this regard. + * argument to rustsecp256k1_v0_10_0_ellswift_encode in this regard. * - * This function can be used instead of calling rustsecp256k1_v0_9_2_ec_pubkey_create - * followed by rustsecp256k1_v0_9_2_ellswift_encode. It is safer, as it uses the secret + * This function can be used instead of calling rustsecp256k1_v0_10_0_ec_pubkey_create + * followed by rustsecp256k1_v0_10_0_ellswift_encode. It is safer, as it uses the secret * key as entropy for the encoding (supplemented with auxrnd32, if provided). * - * Like rustsecp256k1_v0_9_2_ellswift_encode, this function does not guarantee that the + * Like rustsecp256k1_v0_10_0_ellswift_encode, this function does not guarantee that the * computed encoding is stable across versions of the library, even if all * arguments (including auxrnd32) are the same. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ellswift_create( - const rustsecp256k1_v0_9_2_context *ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_10_0_ellswift_create( + const rustsecp256k1_v0_10_0_context *ctx, unsigned char *ell64, const unsigned char *seckey32, const unsigned char *auxrnd32 @@ -182,14 +182,14 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ellswift_cre * This function is more efficient than decoding the public keys, and performing * ECDH on them. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ellswift_xdh( - const rustsecp256k1_v0_9_2_context *ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_10_0_ellswift_xdh( + const rustsecp256k1_v0_10_0_context *ctx, unsigned char *output, const unsigned char *ell_a64, const unsigned char *ell_b64, const unsigned char *seckey32, int party, - rustsecp256k1_v0_9_2_ellswift_xdh_hash_function hashfp, + rustsecp256k1_v0_10_0_ellswift_xdh_hash_function hashfp, void *data ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(5) SECP256K1_ARG_NONNULL(7); diff --git a/secp256k1-sys/depend/secp256k1/include/secp256k1_extrakeys.h b/secp256k1-sys/depend/secp256k1/include/secp256k1_extrakeys.h index 39c81b519..033fbb6f6 100644 --- a/secp256k1-sys/depend/secp256k1/include/secp256k1_extrakeys.h +++ b/secp256k1-sys/depend/secp256k1/include/secp256k1_extrakeys.h @@ -16,12 +16,12 @@ extern "C" { * guaranteed to be portable between different platforms or versions. It is * however guaranteed to be 64 bytes in size, and can be safely copied/moved. * If you need to convert to a format suitable for storage, transmission, use - * use rustsecp256k1_v0_9_2_xonly_pubkey_serialize and rustsecp256k1_v0_9_2_xonly_pubkey_parse. To - * compare keys, use rustsecp256k1_v0_9_2_xonly_pubkey_cmp. + * use rustsecp256k1_v0_10_0_xonly_pubkey_serialize and rustsecp256k1_v0_10_0_xonly_pubkey_parse. To + * compare keys, use rustsecp256k1_v0_10_0_xonly_pubkey_cmp. */ typedef struct { unsigned char data[64]; -} rustsecp256k1_v0_9_2_xonly_pubkey; +} rustsecp256k1_v0_10_0_xonly_pubkey; /** Opaque data structure that holds a keypair consisting of a secret and a * public key. @@ -32,7 +32,7 @@ typedef struct { */ typedef struct { unsigned char data[96]; -} rustsecp256k1_v0_9_2_keypair; +} rustsecp256k1_v0_10_0_keypair; /** Parse a 32-byte sequence into a xonly_pubkey object. * @@ -44,9 +44,9 @@ typedef struct { * parsed version of input. If not, it's set to an invalid value. * In: input32: pointer to a serialized xonly_pubkey. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_xonly_pubkey_parse( - const rustsecp256k1_v0_9_2_context *ctx, - rustsecp256k1_v0_9_2_xonly_pubkey *pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_10_0_xonly_pubkey_parse( + const rustsecp256k1_v0_10_0_context *ctx, + rustsecp256k1_v0_10_0_xonly_pubkey *pubkey, const unsigned char *input32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -56,12 +56,12 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_xonly_pubkey * * Args: ctx: a secp256k1 context object. * Out: output32: a pointer to a 32-byte array to place the serialized key in. - * In: pubkey: a pointer to a rustsecp256k1_v0_9_2_xonly_pubkey containing an initialized public key. + * In: pubkey: a pointer to a rustsecp256k1_v0_10_0_xonly_pubkey containing an initialized public key. */ -SECP256K1_API int rustsecp256k1_v0_9_2_xonly_pubkey_serialize( - const rustsecp256k1_v0_9_2_context *ctx, +SECP256K1_API int rustsecp256k1_v0_10_0_xonly_pubkey_serialize( + const rustsecp256k1_v0_10_0_context *ctx, unsigned char *output32, - const rustsecp256k1_v0_9_2_xonly_pubkey *pubkey + const rustsecp256k1_v0_10_0_xonly_pubkey *pubkey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); /** Compare two x-only public keys using lexicographic order @@ -73,13 +73,13 @@ SECP256K1_API int rustsecp256k1_v0_9_2_xonly_pubkey_serialize( * In: pubkey1: first public key to compare * pubkey2: second public key to compare */ -SECP256K1_API int rustsecp256k1_v0_9_2_xonly_pubkey_cmp( - const rustsecp256k1_v0_9_2_context *ctx, - const rustsecp256k1_v0_9_2_xonly_pubkey *pk1, - const rustsecp256k1_v0_9_2_xonly_pubkey *pk2 +SECP256K1_API int rustsecp256k1_v0_10_0_xonly_pubkey_cmp( + const rustsecp256k1_v0_10_0_context *ctx, + const rustsecp256k1_v0_10_0_xonly_pubkey *pk1, + const rustsecp256k1_v0_10_0_xonly_pubkey *pk2 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); -/** Converts a rustsecp256k1_v0_9_2_pubkey into a rustsecp256k1_v0_9_2_xonly_pubkey. +/** Converts a rustsecp256k1_v0_10_0_pubkey into a rustsecp256k1_v0_10_0_xonly_pubkey. * * Returns: 1 always. * @@ -90,11 +90,11 @@ SECP256K1_API int rustsecp256k1_v0_9_2_xonly_pubkey_cmp( * the negation of the pubkey and set to 0 otherwise. * In: pubkey: pointer to a public key that is converted. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_xonly_pubkey_from_pubkey( - const rustsecp256k1_v0_9_2_context *ctx, - rustsecp256k1_v0_9_2_xonly_pubkey *xonly_pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_10_0_xonly_pubkey_from_pubkey( + const rustsecp256k1_v0_10_0_context *ctx, + rustsecp256k1_v0_10_0_xonly_pubkey *xonly_pubkey, int *pk_parity, - const rustsecp256k1_v0_9_2_pubkey *pubkey + const rustsecp256k1_v0_10_0_pubkey *pubkey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(4); /** Tweak an x-only public key by adding the generator multiplied with tweak32 @@ -102,7 +102,7 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_xonly_pubkey * * Note that the resulting point can not in general be represented by an x-only * pubkey because it may have an odd Y coordinate. Instead, the output_pubkey - * is a normal rustsecp256k1_v0_9_2_pubkey. + * is a normal rustsecp256k1_v0_10_0_pubkey. * * Returns: 0 if the arguments are invalid or the resulting public key would be * invalid (only when the tweak is the negation of the corresponding @@ -113,23 +113,23 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_xonly_pubkey * to an invalid value if this function returns 0. * In: internal_pubkey: pointer to an x-only pubkey to apply the tweak to. * tweak32: pointer to a 32-byte tweak, which must be valid - * according to rustsecp256k1_v0_9_2_ec_seckey_verify or 32 zero + * according to rustsecp256k1_v0_10_0_ec_seckey_verify or 32 zero * bytes. For uniformly random 32-byte tweaks, the chance of * being invalid is negligible (around 1 in 2^128). */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add( - const rustsecp256k1_v0_9_2_context *ctx, - rustsecp256k1_v0_9_2_pubkey *output_pubkey, - const rustsecp256k1_v0_9_2_xonly_pubkey *internal_pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add( + const rustsecp256k1_v0_10_0_context *ctx, + rustsecp256k1_v0_10_0_pubkey *output_pubkey, + const rustsecp256k1_v0_10_0_xonly_pubkey *internal_pubkey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); /** Checks that a tweaked pubkey is the result of calling - * rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add with internal_pubkey and tweak32. + * rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add with internal_pubkey and tweak32. * * The tweaked pubkey is represented by its 32-byte x-only serialization and * its pk_parity, which can both be obtained by converting the result of - * tweak_add to a rustsecp256k1_v0_9_2_xonly_pubkey. + * tweak_add to a rustsecp256k1_v0_10_0_xonly_pubkey. * * Note that this alone does _not_ verify that the tweaked pubkey is a * commitment. If the tweak is not chosen in a specific way, the tweaked pubkey @@ -142,16 +142,16 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_xonly_pubkey * tweaked_pk_parity: the parity of the tweaked pubkey (whose serialization * is passed in as tweaked_pubkey32). This must match the * pk_parity value that is returned when calling - * rustsecp256k1_v0_9_2_xonly_pubkey with the tweaked pubkey, or + * rustsecp256k1_v0_10_0_xonly_pubkey with the tweaked pubkey, or * this function will fail. * internal_pubkey: pointer to an x-only public key object to apply the tweak to. * tweak32: pointer to a 32-byte tweak. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add_check( - const rustsecp256k1_v0_9_2_context *ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add_check( + const rustsecp256k1_v0_10_0_context *ctx, const unsigned char *tweaked_pubkey32, int tweaked_pk_parity, - const rustsecp256k1_v0_9_2_xonly_pubkey *internal_pubkey, + const rustsecp256k1_v0_10_0_xonly_pubkey *internal_pubkey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(5); @@ -159,13 +159,13 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_xonly_pubkey * * Returns: 1: secret was valid, keypair is ready to use * 0: secret was invalid, try again with a different secret - * Args: ctx: pointer to a context object (not rustsecp256k1_v0_9_2_context_static). + * Args: ctx: pointer to a context object (not rustsecp256k1_v0_10_0_context_static). * Out: keypair: pointer to the created keypair. * In: seckey: pointer to a 32-byte secret key. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_keypair_create( - const rustsecp256k1_v0_9_2_context *ctx, - rustsecp256k1_v0_9_2_keypair *keypair, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_10_0_keypair_create( + const rustsecp256k1_v0_10_0_context *ctx, + rustsecp256k1_v0_10_0_keypair *keypair, const unsigned char *seckey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -176,10 +176,10 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_keypair_crea * Out: seckey: pointer to a 32-byte buffer for the secret key. * In: keypair: pointer to a keypair. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_keypair_sec( - const rustsecp256k1_v0_9_2_context *ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_10_0_keypair_sec( + const rustsecp256k1_v0_10_0_context *ctx, unsigned char *seckey, - const rustsecp256k1_v0_9_2_keypair *keypair + const rustsecp256k1_v0_10_0_keypair *keypair ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); /** Get the public key from a keypair. @@ -189,38 +189,38 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_keypair_sec( * Out: pubkey: pointer to a pubkey object, set to the keypair public key. * In: keypair: pointer to a keypair. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_keypair_pub( - const rustsecp256k1_v0_9_2_context *ctx, - rustsecp256k1_v0_9_2_pubkey *pubkey, - const rustsecp256k1_v0_9_2_keypair *keypair +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_10_0_keypair_pub( + const rustsecp256k1_v0_10_0_context *ctx, + rustsecp256k1_v0_10_0_pubkey *pubkey, + const rustsecp256k1_v0_10_0_keypair *keypair ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); /** Get the x-only public key from a keypair. * - * This is the same as calling rustsecp256k1_v0_9_2_keypair_pub and then - * rustsecp256k1_v0_9_2_xonly_pubkey_from_pubkey. + * This is the same as calling rustsecp256k1_v0_10_0_keypair_pub and then + * rustsecp256k1_v0_10_0_xonly_pubkey_from_pubkey. * * Returns: 1 always. * Args: ctx: pointer to a context object. * Out: pubkey: pointer to an xonly_pubkey object, set to the keypair * public key after converting it to an xonly_pubkey. * pk_parity: Ignored if NULL. Otherwise, pointer to an integer that will be set to the - * pk_parity argument of rustsecp256k1_v0_9_2_xonly_pubkey_from_pubkey. + * pk_parity argument of rustsecp256k1_v0_10_0_xonly_pubkey_from_pubkey. * In: keypair: pointer to a keypair. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_keypair_xonly_pub( - const rustsecp256k1_v0_9_2_context *ctx, - rustsecp256k1_v0_9_2_xonly_pubkey *pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_10_0_keypair_xonly_pub( + const rustsecp256k1_v0_10_0_context *ctx, + rustsecp256k1_v0_10_0_xonly_pubkey *pubkey, int *pk_parity, - const rustsecp256k1_v0_9_2_keypair *keypair + const rustsecp256k1_v0_10_0_keypair *keypair ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(4); /** Tweak a keypair by adding tweak32 to the secret key and updating the public * key accordingly. * - * Calling this function and then rustsecp256k1_v0_9_2_keypair_pub results in the same - * public key as calling rustsecp256k1_v0_9_2_keypair_xonly_pub and then - * rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add. + * Calling this function and then rustsecp256k1_v0_10_0_keypair_pub results in the same + * public key as calling rustsecp256k1_v0_10_0_keypair_xonly_pub and then + * rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add. * * Returns: 0 if the arguments are invalid or the resulting keypair would be * invalid (only when the tweak is the negation of the keypair's @@ -230,13 +230,13 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_keypair_xonl * In/Out: keypair: pointer to a keypair to apply the tweak to. Will be set to * an invalid value if this function returns 0. * In: tweak32: pointer to a 32-byte tweak, which must be valid according to - * rustsecp256k1_v0_9_2_ec_seckey_verify or 32 zero bytes. For uniformly + * rustsecp256k1_v0_10_0_ec_seckey_verify or 32 zero bytes. For uniformly * random 32-byte tweaks, the chance of being invalid is * negligible (around 1 in 2^128). */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_keypair_xonly_tweak_add( - const rustsecp256k1_v0_9_2_context *ctx, - rustsecp256k1_v0_9_2_keypair *keypair, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_10_0_keypair_xonly_tweak_add( + const rustsecp256k1_v0_10_0_context *ctx, + rustsecp256k1_v0_10_0_keypair *keypair, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); diff --git a/secp256k1-sys/depend/secp256k1/include/secp256k1_preallocated.h b/secp256k1-sys/depend/secp256k1/include/secp256k1_preallocated.h index ada165414..46bc0da82 100644 --- a/secp256k1-sys/depend/secp256k1/include/secp256k1_preallocated.h +++ b/secp256k1-sys/depend/secp256k1/include/secp256k1_preallocated.h @@ -16,8 +16,8 @@ extern "C" { * objects created by functions in secp256k1.h, i.e., they can be passed to any * API function that expects a context object (see secp256k1.h for details). The * only exception is that context objects created by functions in this module - * must be destroyed using rustsecp256k1_v0_9_2_context_preallocated_destroy (in this - * module) instead of rustsecp256k1_v0_9_2_context_destroy (in secp256k1.h). + * must be destroyed using rustsecp256k1_v0_10_0_context_preallocated_destroy (in this + * module) instead of rustsecp256k1_v0_10_0_context_destroy (in secp256k1.h). * * It is guaranteed that functions in this module will not call malloc or its * friends realloc, calloc, and free. @@ -27,24 +27,24 @@ extern "C" { * caller-provided memory. * * The purpose of this function is to determine how much memory must be provided - * to rustsecp256k1_v0_9_2_context_preallocated_create. + * to rustsecp256k1_v0_10_0_context_preallocated_create. * * Returns: the required size of the caller-provided memory block * In: flags: which parts of the context to initialize. */ -SECP256K1_API size_t rustsecp256k1_v0_9_2_context_preallocated_size( +SECP256K1_API size_t rustsecp256k1_v0_10_0_context_preallocated_size( unsigned int flags ) SECP256K1_WARN_UNUSED_RESULT; /** Create a secp256k1 context object in caller-provided memory. * * The caller must provide a pointer to a rewritable contiguous block of memory - * of size at least rustsecp256k1_v0_9_2_context_preallocated_size(flags) bytes, suitably + * of size at least rustsecp256k1_v0_10_0_context_preallocated_size(flags) bytes, suitably * aligned to hold an object of any type. * * The block of memory is exclusively owned by the created context object during * the lifetime of this context object, which begins with the call to this - * function and ends when a call to rustsecp256k1_v0_9_2_context_preallocated_destroy + * function and ends when a call to rustsecp256k1_v0_10_0_context_preallocated_destroy * (which destroys the context object again) returns. During the lifetime of the * context object, the caller is obligated not to access this block of memory, * i.e., the caller may not read or write the memory, e.g., by copying the memory @@ -54,16 +54,16 @@ SECP256K1_API size_t rustsecp256k1_v0_9_2_context_preallocated_size( * * Returns: a newly created context object. * In: prealloc: a pointer to a rewritable contiguous block of memory of - * size at least rustsecp256k1_v0_9_2_context_preallocated_size(flags) + * size at least rustsecp256k1_v0_10_0_context_preallocated_size(flags) * bytes, as detailed above. * flags: which parts of the context to initialize. * - * See rustsecp256k1_v0_9_2_context_create (in secp256k1.h) for further details. + * See rustsecp256k1_v0_10_0_context_create (in secp256k1.h) for further details. * - * See also rustsecp256k1_v0_9_2_context_randomize (in secp256k1.h) - * and rustsecp256k1_v0_9_2_context_preallocated_destroy. + * See also rustsecp256k1_v0_10_0_context_randomize (in secp256k1.h) + * and rustsecp256k1_v0_10_0_context_preallocated_destroy. */ -SECP256K1_API rustsecp256k1_v0_9_2_context *rustsecp256k1_v0_9_2_context_preallocated_create( +SECP256K1_API rustsecp256k1_v0_10_0_context *rustsecp256k1_v0_10_0_context_preallocated_create( void *prealloc, unsigned int flags ) SECP256K1_ARG_NONNULL(1) SECP256K1_WARN_UNUSED_RESULT; @@ -74,31 +74,31 @@ SECP256K1_API rustsecp256k1_v0_9_2_context *rustsecp256k1_v0_9_2_context_preallo * Returns: the required size of the caller-provided memory block. * In: ctx: an existing context to copy. */ -SECP256K1_API size_t rustsecp256k1_v0_9_2_context_preallocated_clone_size( - const rustsecp256k1_v0_9_2_context *ctx +SECP256K1_API size_t rustsecp256k1_v0_10_0_context_preallocated_clone_size( + const rustsecp256k1_v0_10_0_context *ctx ) SECP256K1_ARG_NONNULL(1) SECP256K1_WARN_UNUSED_RESULT; /** Copy a secp256k1 context object into caller-provided memory. * * The caller must provide a pointer to a rewritable contiguous block of memory - * of size at least rustsecp256k1_v0_9_2_context_preallocated_size(flags) bytes, suitably + * of size at least rustsecp256k1_v0_10_0_context_preallocated_size(flags) bytes, suitably * aligned to hold an object of any type. * * The block of memory is exclusively owned by the created context object during * the lifetime of this context object, see the description of - * rustsecp256k1_v0_9_2_context_preallocated_create for details. + * rustsecp256k1_v0_10_0_context_preallocated_create for details. * - * Cloning rustsecp256k1_v0_9_2_context_static is not possible, and should not be emulated by + * Cloning rustsecp256k1_v0_10_0_context_static is not possible, and should not be emulated by * the caller (e.g., using memcpy). Create a new context instead. * * Returns: a newly created context object. - * Args: ctx: an existing context to copy (not rustsecp256k1_v0_9_2_context_static). + * Args: ctx: an existing context to copy (not rustsecp256k1_v0_10_0_context_static). * In: prealloc: a pointer to a rewritable contiguous block of memory of - * size at least rustsecp256k1_v0_9_2_context_preallocated_size(flags) + * size at least rustsecp256k1_v0_10_0_context_preallocated_size(flags) * bytes, as detailed above. */ -SECP256K1_API rustsecp256k1_v0_9_2_context *rustsecp256k1_v0_9_2_context_preallocated_clone( - const rustsecp256k1_v0_9_2_context *ctx, +SECP256K1_API rustsecp256k1_v0_10_0_context *rustsecp256k1_v0_10_0_context_preallocated_clone( + const rustsecp256k1_v0_10_0_context *ctx, void *prealloc ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_WARN_UNUSED_RESULT; @@ -108,23 +108,23 @@ SECP256K1_API rustsecp256k1_v0_9_2_context *rustsecp256k1_v0_9_2_context_preallo * The context pointer may not be used afterwards. * * The context to destroy must have been created using - * rustsecp256k1_v0_9_2_context_preallocated_create or rustsecp256k1_v0_9_2_context_preallocated_clone. - * If the context has instead been created using rustsecp256k1_v0_9_2_context_create or - * rustsecp256k1_v0_9_2_context_clone, the behaviour is undefined. In that case, - * rustsecp256k1_v0_9_2_context_destroy must be used instead. + * rustsecp256k1_v0_10_0_context_preallocated_create or rustsecp256k1_v0_10_0_context_preallocated_clone. + * If the context has instead been created using rustsecp256k1_v0_10_0_context_create or + * rustsecp256k1_v0_10_0_context_clone, the behaviour is undefined. In that case, + * rustsecp256k1_v0_10_0_context_destroy must be used instead. * * If required, it is the responsibility of the caller to deallocate the block * of memory properly after this function returns, e.g., by calling free on the - * preallocated pointer given to rustsecp256k1_v0_9_2_context_preallocated_create or - * rustsecp256k1_v0_9_2_context_preallocated_clone. + * preallocated pointer given to rustsecp256k1_v0_10_0_context_preallocated_create or + * rustsecp256k1_v0_10_0_context_preallocated_clone. * * Args: ctx: an existing context to destroy, constructed using - * rustsecp256k1_v0_9_2_context_preallocated_create or - * rustsecp256k1_v0_9_2_context_preallocated_clone - * (i.e., not rustsecp256k1_v0_9_2_context_static). + * rustsecp256k1_v0_10_0_context_preallocated_create or + * rustsecp256k1_v0_10_0_context_preallocated_clone + * (i.e., not rustsecp256k1_v0_10_0_context_static). */ -SECP256K1_API void rustsecp256k1_v0_9_2_context_preallocated_destroy( - rustsecp256k1_v0_9_2_context *ctx +SECP256K1_API void rustsecp256k1_v0_10_0_context_preallocated_destroy( + rustsecp256k1_v0_10_0_context *ctx ) SECP256K1_ARG_NONNULL(1); #ifdef __cplusplus diff --git a/secp256k1-sys/depend/secp256k1/include/secp256k1_recovery.h b/secp256k1-sys/depend/secp256k1/include/secp256k1_recovery.h index 822439a61..64acee9b4 100644 --- a/secp256k1-sys/depend/secp256k1/include/secp256k1_recovery.h +++ b/secp256k1-sys/depend/secp256k1/include/secp256k1_recovery.h @@ -14,8 +14,8 @@ extern "C" { * guaranteed to be portable between different platforms or versions. It is * however guaranteed to be 65 bytes in size, and can be safely copied/moved. * If you need to convert to a format suitable for storage or transmission, use - * the rustsecp256k1_v0_9_2_ecdsa_signature_serialize_* and - * rustsecp256k1_v0_9_2_ecdsa_signature_parse_* functions. + * the rustsecp256k1_v0_10_0_ecdsa_signature_serialize_* and + * rustsecp256k1_v0_10_0_ecdsa_signature_parse_* functions. * * Furthermore, it is guaranteed that identical signatures (including their * recoverability) will have identical representation, so they can be @@ -23,7 +23,7 @@ extern "C" { */ typedef struct { unsigned char data[65]; -} rustsecp256k1_v0_9_2_ecdsa_recoverable_signature; +} rustsecp256k1_v0_10_0_ecdsa_recoverable_signature; /** Parse a compact ECDSA signature (64 bytes + recovery id). * @@ -33,9 +33,9 @@ typedef struct { * In: input64: a pointer to a 64-byte compact signature * recid: the recovery id (0, 1, 2 or 3) */ -SECP256K1_API int rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_parse_compact( - const rustsecp256k1_v0_9_2_context *ctx, - rustsecp256k1_v0_9_2_ecdsa_recoverable_signature *sig, +SECP256K1_API int rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_parse_compact( + const rustsecp256k1_v0_10_0_context *ctx, + rustsecp256k1_v0_10_0_ecdsa_recoverable_signature *sig, const unsigned char *input64, int recid ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -47,10 +47,10 @@ SECP256K1_API int rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_parse_compact * Out: sig: a pointer to a normal signature. * In: sigin: a pointer to a recoverable signature. */ -SECP256K1_API int rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_convert( - const rustsecp256k1_v0_9_2_context *ctx, - rustsecp256k1_v0_9_2_ecdsa_signature *sig, - const rustsecp256k1_v0_9_2_ecdsa_recoverable_signature *sigin +SECP256K1_API int rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_convert( + const rustsecp256k1_v0_10_0_context *ctx, + rustsecp256k1_v0_10_0_ecdsa_signature *sig, + const rustsecp256k1_v0_10_0_ecdsa_recoverable_signature *sigin ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); /** Serialize an ECDSA signature in compact format (64 bytes + recovery id). @@ -61,32 +61,32 @@ SECP256K1_API int rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_convert( * recid: a pointer to an integer to hold the recovery id. * In: sig: a pointer to an initialized signature object. */ -SECP256K1_API int rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_serialize_compact( - const rustsecp256k1_v0_9_2_context *ctx, +SECP256K1_API int rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_serialize_compact( + const rustsecp256k1_v0_10_0_context *ctx, unsigned char *output64, int *recid, - const rustsecp256k1_v0_9_2_ecdsa_recoverable_signature *sig + const rustsecp256k1_v0_10_0_ecdsa_recoverable_signature *sig ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); /** Create a recoverable ECDSA signature. * * Returns: 1: signature created * 0: the nonce generation function failed, or the secret key was invalid. - * Args: ctx: pointer to a context object (not rustsecp256k1_v0_9_2_context_static). + * Args: ctx: pointer to a context object (not rustsecp256k1_v0_10_0_context_static). * Out: sig: pointer to an array where the signature will be placed. * In: msghash32: the 32-byte message hash being signed. * seckey: pointer to a 32-byte secret key. * noncefp: pointer to a nonce generation function. If NULL, - * rustsecp256k1_v0_9_2_nonce_function_default is used. + * rustsecp256k1_v0_10_0_nonce_function_default is used. * ndata: pointer to arbitrary data used by the nonce generation function - * (can be NULL for rustsecp256k1_v0_9_2_nonce_function_default). + * (can be NULL for rustsecp256k1_v0_10_0_nonce_function_default). */ -SECP256K1_API int rustsecp256k1_v0_9_2_ecdsa_sign_recoverable( - const rustsecp256k1_v0_9_2_context *ctx, - rustsecp256k1_v0_9_2_ecdsa_recoverable_signature *sig, +SECP256K1_API int rustsecp256k1_v0_10_0_ecdsa_sign_recoverable( + const rustsecp256k1_v0_10_0_context *ctx, + rustsecp256k1_v0_10_0_ecdsa_recoverable_signature *sig, const unsigned char *msghash32, const unsigned char *seckey, - rustsecp256k1_v0_9_2_nonce_function noncefp, + rustsecp256k1_v0_10_0_nonce_function noncefp, const void *ndata ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); @@ -99,10 +99,10 @@ SECP256K1_API int rustsecp256k1_v0_9_2_ecdsa_sign_recoverable( * In: sig: pointer to initialized signature that supports pubkey recovery. * msghash32: the 32-byte message hash assumed to be signed. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_ecdsa_recover( - const rustsecp256k1_v0_9_2_context *ctx, - rustsecp256k1_v0_9_2_pubkey *pubkey, - const rustsecp256k1_v0_9_2_ecdsa_recoverable_signature *sig, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_10_0_ecdsa_recover( + const rustsecp256k1_v0_10_0_context *ctx, + rustsecp256k1_v0_10_0_pubkey *pubkey, + const rustsecp256k1_v0_10_0_ecdsa_recoverable_signature *sig, const unsigned char *msghash32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); diff --git a/secp256k1-sys/depend/secp256k1/include/secp256k1_schnorrsig.h b/secp256k1-sys/depend/secp256k1/include/secp256k1_schnorrsig.h index d0944c859..6912558ea 100644 --- a/secp256k1-sys/depend/secp256k1/include/secp256k1_schnorrsig.h +++ b/secp256k1-sys/depend/secp256k1/include/secp256k1_schnorrsig.h @@ -15,7 +15,7 @@ extern "C" { /** A pointer to a function to deterministically generate a nonce. * - * Same as rustsecp256k1_v0_9_2_nonce function with the exception of accepting an + * Same as rustsecp256k1_v0_10_0_nonce function with the exception of accepting an * additional pubkey argument and not requiring an attempt argument. The pubkey * argument can protect signature schemes with key-prefixed challenge hash * inputs against reusing the nonce when signing with the wrong precomputed @@ -38,7 +38,7 @@ extern "C" { * Except for test cases, this function should compute some cryptographic hash of * the message, the key, the pubkey, the algorithm description, and data. */ -typedef int (*rustsecp256k1_v0_9_2_nonce_function_hardened)( +typedef int (*rustsecp256k1_v0_10_0_nonce_function_hardened)( unsigned char *nonce32, const unsigned char *msg, size_t msglen, @@ -61,7 +61,7 @@ typedef int (*rustsecp256k1_v0_9_2_nonce_function_hardened)( * Therefore, to create BIP-340 compliant signatures, algo must be set to * "BIP0340/nonce" and algolen to 13. */ -SECP256K1_API const rustsecp256k1_v0_9_2_nonce_function_hardened rustsecp256k1_v0_9_2_nonce_function_bip340; +SECP256K1_API const rustsecp256k1_v0_10_0_nonce_function_hardened rustsecp256k1_v0_10_0_nonce_function_bip340; /** Data structure that contains additional arguments for schnorrsig_sign_custom. * @@ -73,17 +73,17 @@ SECP256K1_API const rustsecp256k1_v0_9_2_nonce_function_hardened rustsecp256k1_v * and has no other function than making sure the object is * initialized. * noncefp: pointer to a nonce generation function. If NULL, - * rustsecp256k1_v0_9_2_nonce_function_bip340 is used + * rustsecp256k1_v0_10_0_nonce_function_bip340 is used * ndata: pointer to arbitrary data used by the nonce generation function * (can be NULL). If it is non-NULL and - * rustsecp256k1_v0_9_2_nonce_function_bip340 is used, then ndata must be a + * rustsecp256k1_v0_10_0_nonce_function_bip340 is used, then ndata must be a * pointer to 32-byte auxiliary randomness as per BIP-340. */ typedef struct { unsigned char magic[4]; - rustsecp256k1_v0_9_2_nonce_function_hardened noncefp; + rustsecp256k1_v0_10_0_nonce_function_hardened noncefp; void *ndata; -} rustsecp256k1_v0_9_2_schnorrsig_extraparams; +} rustsecp256k1_v0_10_0_schnorrsig_extraparams; #define SECP256K1_SCHNORRSIG_EXTRAPARAMS_MAGIC { 0xda, 0x6f, 0xb3, 0x8c } #define SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT {\ @@ -95,18 +95,18 @@ typedef struct { /** Create a Schnorr signature. * * Does _not_ strictly follow BIP-340 because it does not verify the resulting - * signature. Instead, you can manually use rustsecp256k1_v0_9_2_schnorrsig_verify and + * signature. Instead, you can manually use rustsecp256k1_v0_10_0_schnorrsig_verify and * abort if it fails. * * This function only signs 32-byte messages. If you have messages of a * different size (or the same size but without a context-specific tag * prefix), it is recommended to create a 32-byte message hash with - * rustsecp256k1_v0_9_2_tagged_sha256 and then sign the hash. Tagged hashing allows + * rustsecp256k1_v0_10_0_tagged_sha256 and then sign the hash. Tagged hashing allows * providing an context-specific tag for domain separation. This prevents * signatures from being valid in multiple contexts by accident. * * Returns 1 on success, 0 on failure. - * Args: ctx: pointer to a context object (not rustsecp256k1_v0_9_2_context_static). + * Args: ctx: pointer to a context object (not rustsecp256k1_v0_10_0_context_static). * Out: sig64: pointer to a 64-byte array to store the serialized signature. * In: msg32: the 32-byte message being signed. * keypair: pointer to an initialized keypair. @@ -116,53 +116,53 @@ typedef struct { * BIP-340 "Default Signing" for a full explanation of this * argument and for guidance if randomness is expensive. */ -SECP256K1_API int rustsecp256k1_v0_9_2_schnorrsig_sign32( - const rustsecp256k1_v0_9_2_context *ctx, +SECP256K1_API int rustsecp256k1_v0_10_0_schnorrsig_sign32( + const rustsecp256k1_v0_10_0_context *ctx, unsigned char *sig64, const unsigned char *msg32, - const rustsecp256k1_v0_9_2_keypair *keypair, + const rustsecp256k1_v0_10_0_keypair *keypair, const unsigned char *aux_rand32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); -/** Same as rustsecp256k1_v0_9_2_schnorrsig_sign32, but DEPRECATED. Will be removed in +/** Same as rustsecp256k1_v0_10_0_schnorrsig_sign32, but DEPRECATED. Will be removed in * future versions. */ -SECP256K1_API int rustsecp256k1_v0_9_2_schnorrsig_sign( - const rustsecp256k1_v0_9_2_context *ctx, +SECP256K1_API int rustsecp256k1_v0_10_0_schnorrsig_sign( + const rustsecp256k1_v0_10_0_context *ctx, unsigned char *sig64, const unsigned char *msg32, - const rustsecp256k1_v0_9_2_keypair *keypair, + const rustsecp256k1_v0_10_0_keypair *keypair, const unsigned char *aux_rand32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4) - SECP256K1_DEPRECATED("Use rustsecp256k1_v0_9_2_schnorrsig_sign32 instead"); + SECP256K1_DEPRECATED("Use rustsecp256k1_v0_10_0_schnorrsig_sign32 instead"); /** Create a Schnorr signature with a more flexible API. * - * Same arguments as rustsecp256k1_v0_9_2_schnorrsig_sign except that it allows signing + * Same arguments as rustsecp256k1_v0_10_0_schnorrsig_sign except that it allows signing * variable length messages and accepts a pointer to an extraparams object that * allows customizing signing by passing additional arguments. * - * Equivalent to rustsecp256k1_v0_9_2_schnorrsig_sign32(..., aux_rand32) if msglen is 32 + * Equivalent to rustsecp256k1_v0_10_0_schnorrsig_sign32(..., aux_rand32) if msglen is 32 * and extraparams is initialized as follows: * ``` - * rustsecp256k1_v0_9_2_schnorrsig_extraparams extraparams = SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT; + * rustsecp256k1_v0_10_0_schnorrsig_extraparams extraparams = SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT; * extraparams.ndata = (unsigned char*)aux_rand32; * ``` * * Returns 1 on success, 0 on failure. - * Args: ctx: pointer to a context object (not rustsecp256k1_v0_9_2_context_static). + * Args: ctx: pointer to a context object (not rustsecp256k1_v0_10_0_context_static). * Out: sig64: pointer to a 64-byte array to store the serialized signature. * In: msg: the message being signed. Can only be NULL if msglen is 0. * msglen: length of the message. * keypair: pointer to an initialized keypair. * extraparams: pointer to an extraparams object (can be NULL). */ -SECP256K1_API int rustsecp256k1_v0_9_2_schnorrsig_sign_custom( - const rustsecp256k1_v0_9_2_context *ctx, +SECP256K1_API int rustsecp256k1_v0_10_0_schnorrsig_sign_custom( + const rustsecp256k1_v0_10_0_context *ctx, unsigned char *sig64, const unsigned char *msg, size_t msglen, - const rustsecp256k1_v0_9_2_keypair *keypair, - rustsecp256k1_v0_9_2_schnorrsig_extraparams *extraparams + const rustsecp256k1_v0_10_0_keypair *keypair, + rustsecp256k1_v0_10_0_schnorrsig_extraparams *extraparams ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(5); /** Verify a Schnorr signature. @@ -175,12 +175,12 @@ SECP256K1_API int rustsecp256k1_v0_9_2_schnorrsig_sign_custom( * msglen: length of the message * pubkey: pointer to an x-only public key to verify with (cannot be NULL) */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_2_schnorrsig_verify( - const rustsecp256k1_v0_9_2_context *ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_10_0_schnorrsig_verify( + const rustsecp256k1_v0_10_0_context *ctx, const unsigned char *sig64, const unsigned char *msg, size_t msglen, - const rustsecp256k1_v0_9_2_xonly_pubkey *pubkey + const rustsecp256k1_v0_10_0_xonly_pubkey *pubkey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(5); #ifdef __cplusplus diff --git a/secp256k1-sys/depend/secp256k1/sage/gen_exhaustive_groups.sage b/secp256k1-sys/depend/secp256k1/sage/gen_exhaustive_groups.sage index ad6266b20..1fc57f4b0 100644 --- a/secp256k1-sys/depend/secp256k1/sage/gen_exhaustive_groups.sage +++ b/secp256k1-sys/depend/secp256k1/sage/gen_exhaustive_groups.sage @@ -1,4 +1,4 @@ -load("rustsecp256k1_v0_9_2_params.sage") +load("rustsecp256k1_v0_10_0_params.sage") MAX_ORDER = 1000 @@ -124,7 +124,7 @@ for f in sorted(solutions.keys()): print(f"# {'if' if first else 'elif'} EXHAUSTIVE_TEST_ORDER == {f}") first = False print() - print(f"static const rustsecp256k1_v0_9_2_ge rustsecp256k1_v0_9_2_ge_const_g = SECP256K1_G_ORDER_{f};") + print(f"static const rustsecp256k1_v0_10_0_ge rustsecp256k1_v0_10_0_ge_const_g = SECP256K1_G_ORDER_{f};") output_b(b) print() print("# else") @@ -132,7 +132,7 @@ print("# error No known generator for the specified exhaustive test group ord print("# endif") print("#else") print() -print("static const rustsecp256k1_v0_9_2_ge rustsecp256k1_v0_9_2_ge_const_g = SECP256K1_G;") +print("static const rustsecp256k1_v0_10_0_ge rustsecp256k1_v0_10_0_ge_const_g = SECP256K1_G;") output_b(7) print() print("#endif") diff --git a/secp256k1-sys/depend/secp256k1/sage/gen_split_lambda_constants.sage b/secp256k1-sys/depend/secp256k1/sage/gen_split_lambda_constants.sage index 9ce49aa39..5fa589efe 100644 --- a/secp256k1-sys/depend/secp256k1/sage/gen_split_lambda_constants.sage +++ b/secp256k1-sys/depend/secp256k1/sage/gen_split_lambda_constants.sage @@ -1,9 +1,9 @@ -""" Generates the constants used in rustsecp256k1_v0_9_2_scalar_split_lambda. +""" Generates the constants used in rustsecp256k1_v0_10_0_scalar_split_lambda. -See the comments for rustsecp256k1_v0_9_2_scalar_split_lambda in src/scalar_impl.h for detailed explanations. +See the comments for rustsecp256k1_v0_10_0_scalar_split_lambda in src/scalar_impl.h for detailed explanations. """ -load("rustsecp256k1_v0_9_2_params.sage") +load("rustsecp256k1_v0_10_0_params.sage") def inf_norm(v): """Returns the infinity norm of a vector.""" @@ -24,17 +24,17 @@ def gauss_reduction(i1, i2): v2[1] -= m*v1[1] def find_split_constants_gauss(): - """Find constants for rustsecp256k1_v0_9_2_scalar_split_lamdba using gauss reduction.""" + """Find constants for rustsecp256k1_v0_10_0_scalar_split_lamdba using gauss reduction.""" (v11, v12), (v21, v22) = gauss_reduction([0, N], [1, int(LAMBDA)]) - # We use related vectors in rustsecp256k1_v0_9_2_scalar_split_lambda. + # We use related vectors in rustsecp256k1_v0_10_0_scalar_split_lambda. A1, B1 = -v21, -v11 A2, B2 = v22, -v21 return A1, B1, A2, B2 def find_split_constants_explicit_tof(): - """Find constants for rustsecp256k1_v0_9_2_scalar_split_lamdba using the trace of Frobenius. + """Find constants for rustsecp256k1_v0_10_0_scalar_split_lamdba using the trace of Frobenius. See Benjamin Smith: "Easy scalar decompositions for efficient scalar multiplication on elliptic curves and genus 2 Jacobians" (https://eprint.iacr.org/2013/672), Example 2 @@ -51,7 +51,7 @@ def find_split_constants_explicit_tof(): A2 = Integer((t + c)/2 - 1) B2 = Integer(1 - (t - c)/2) - # We use a negated b values in rustsecp256k1_v0_9_2_scalar_split_lambda. + # We use a negated b values in rustsecp256k1_v0_10_0_scalar_split_lambda. B1, B2 = -B1, -B2 return A1, B1, A2, B2 @@ -90,7 +90,7 @@ def rnddiv2(v): return v >> 1 def scalar_lambda_split(k): - """Equivalent to rustsecp256k1_v0_9_2_scalar_lambda_split().""" + """Equivalent to rustsecp256k1_v0_10_0_scalar_lambda_split().""" c1 = rnddiv2((k * G1) >> 383) c2 = rnddiv2((k * G2) >> 383) c1 = (c1 * -B1) % N diff --git a/secp256k1-sys/depend/secp256k1/sage/prove_group_implementations.sage b/secp256k1-sys/depend/secp256k1/sage/prove_group_implementations.sage index f9d164833..015905215 100644 --- a/secp256k1-sys/depend/secp256k1/sage/prove_group_implementations.sage +++ b/secp256k1-sys/depend/secp256k1/sage/prove_group_implementations.sage @@ -5,8 +5,8 @@ import sys load("group_prover.sage") load("weierstrass_prover.sage") -def formula_rustsecp256k1_v0_9_2_gej_double_var(a): - """libsecp256k1's rustsecp256k1_v0_9_2_gej_double_var, used by various addition functions""" +def formula_rustsecp256k1_v0_10_0_gej_double_var(a): + """libsecp256k1's rustsecp256k1_v0_10_0_gej_double_var, used by various addition functions""" rz = a.Z * a.Y s = a.Y^2 l = a.X^2 @@ -24,8 +24,8 @@ def formula_rustsecp256k1_v0_9_2_gej_double_var(a): ry = -ry return jacobianpoint(rx, ry, rz) -def formula_rustsecp256k1_v0_9_2_gej_add_var(branch, a, b): - """libsecp256k1's rustsecp256k1_v0_9_2_gej_add_var""" +def formula_rustsecp256k1_v0_10_0_gej_add_var(branch, a, b): + """libsecp256k1's rustsecp256k1_v0_10_0_gej_add_var""" if branch == 0: return (constraints(), constraints(nonzero={a.Infinity : 'a_infinite'}), b) if branch == 1: @@ -43,7 +43,7 @@ def formula_rustsecp256k1_v0_9_2_gej_add_var(branch, a, b): i = -s2 i = i + s1 if branch == 2: - r = formula_rustsecp256k1_v0_9_2_gej_double_var(a) + r = formula_rustsecp256k1_v0_10_0_gej_double_var(a) return (constraints(), constraints(zero={h : 'h=0', i : 'i=0', a.Infinity : 'a_finite', b.Infinity : 'b_finite'}), r) if branch == 3: return (constraints(), constraints(zero={h : 'h=0', a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={i : 'i!=0'}), point_at_infinity()) @@ -63,8 +63,8 @@ def formula_rustsecp256k1_v0_9_2_gej_add_var(branch, a, b): ry = ry + h3 return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={h : 'h!=0'}), jacobianpoint(rx, ry, rz)) -def formula_rustsecp256k1_v0_9_2_gej_add_ge_var(branch, a, b): - """libsecp256k1's rustsecp256k1_v0_9_2_gej_add_ge_var, which assume bz==1""" +def formula_rustsecp256k1_v0_10_0_gej_add_ge_var(branch, a, b): + """libsecp256k1's rustsecp256k1_v0_10_0_gej_add_ge_var, which assume bz==1""" if branch == 0: return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(nonzero={a.Infinity : 'a_infinite'}), b) if branch == 1: @@ -80,7 +80,7 @@ def formula_rustsecp256k1_v0_9_2_gej_add_ge_var(branch, a, b): i = -s2 i = i + s1 if (branch == 2): - r = formula_rustsecp256k1_v0_9_2_gej_double_var(a) + r = formula_rustsecp256k1_v0_10_0_gej_double_var(a) return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0', i : 'i=0'}), r) if (branch == 3): return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0'}, nonzero={i : 'i!=0'}), point_at_infinity()) @@ -99,8 +99,8 @@ def formula_rustsecp256k1_v0_9_2_gej_add_ge_var(branch, a, b): ry = ry + h3 return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={h : 'h!=0'}), jacobianpoint(rx, ry, rz)) -def formula_rustsecp256k1_v0_9_2_gej_add_zinv_var(branch, a, b): - """libsecp256k1's rustsecp256k1_v0_9_2_gej_add_zinv_var""" +def formula_rustsecp256k1_v0_10_0_gej_add_zinv_var(branch, a, b): + """libsecp256k1's rustsecp256k1_v0_10_0_gej_add_zinv_var""" bzinv = b.Z^(-1) if branch == 0: rinf = b.Infinity @@ -124,7 +124,7 @@ def formula_rustsecp256k1_v0_9_2_gej_add_zinv_var(branch, a, b): i = -s2 i = i + s1 if branch == 2: - r = formula_rustsecp256k1_v0_9_2_gej_double_var(a) + r = formula_rustsecp256k1_v0_10_0_gej_double_var(a) return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0', i : 'i=0'}), r) if branch == 3: return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0'}, nonzero={i : 'i!=0'}), point_at_infinity()) @@ -143,8 +143,8 @@ def formula_rustsecp256k1_v0_9_2_gej_add_zinv_var(branch, a, b): ry = ry + h3 return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={h : 'h!=0'}), jacobianpoint(rx, ry, rz)) -def formula_rustsecp256k1_v0_9_2_gej_add_ge(branch, a, b): - """libsecp256k1's rustsecp256k1_v0_9_2_gej_add_ge""" +def formula_rustsecp256k1_v0_10_0_gej_add_ge(branch, a, b): + """libsecp256k1's rustsecp256k1_v0_10_0_gej_add_ge""" zeroes = {} nonzeroes = {} a_infinity = False @@ -205,8 +205,8 @@ def formula_rustsecp256k1_v0_9_2_gej_add_ge(branch, a, b): nonzeroes.update({rz : 'r.z != 0'}) return (constraints(zero={b.Z - 1 : 'b.z=1', b.Infinity : 'b_finite'}), constraints(zero=zeroes, nonzero=nonzeroes), jacobianpoint(rx, ry, rz)) -def formula_rustsecp256k1_v0_9_2_gej_add_ge_old(branch, a, b): - """libsecp256k1's old rustsecp256k1_v0_9_2_gej_add_ge, which fails when ay+by=0 but ax!=bx""" +def formula_rustsecp256k1_v0_10_0_gej_add_ge_old(branch, a, b): + """libsecp256k1's old rustsecp256k1_v0_10_0_gej_add_ge, which fails when ay+by=0 but ax!=bx""" a_infinity = (branch & 1) != 0 zero = {} nonzero = {} @@ -269,17 +269,17 @@ def formula_rustsecp256k1_v0_9_2_gej_add_ge_old(branch, a, b): if __name__ == "__main__": success = True - success = success & check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_9_2_gej_add_var", 0, 7, 5, formula_rustsecp256k1_v0_9_2_gej_add_var) - success = success & check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_9_2_gej_add_ge_var", 0, 7, 5, formula_rustsecp256k1_v0_9_2_gej_add_ge_var) - success = success & check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_9_2_gej_add_zinv_var", 0, 7, 5, formula_rustsecp256k1_v0_9_2_gej_add_zinv_var) - success = success & check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_9_2_gej_add_ge", 0, 7, 8, formula_rustsecp256k1_v0_9_2_gej_add_ge) - success = success & (not check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_9_2_gej_add_ge_old [should fail]", 0, 7, 4, formula_rustsecp256k1_v0_9_2_gej_add_ge_old)) + success = success & check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_10_0_gej_add_var", 0, 7, 5, formula_rustsecp256k1_v0_10_0_gej_add_var) + success = success & check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_10_0_gej_add_ge_var", 0, 7, 5, formula_rustsecp256k1_v0_10_0_gej_add_ge_var) + success = success & check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_10_0_gej_add_zinv_var", 0, 7, 5, formula_rustsecp256k1_v0_10_0_gej_add_zinv_var) + success = success & check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_10_0_gej_add_ge", 0, 7, 8, formula_rustsecp256k1_v0_10_0_gej_add_ge) + success = success & (not check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_10_0_gej_add_ge_old [should fail]", 0, 7, 4, formula_rustsecp256k1_v0_10_0_gej_add_ge_old)) if len(sys.argv) >= 2 and sys.argv[1] == "--exhaustive": - success = success & check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_9_2_gej_add_var", 0, 7, 5, formula_rustsecp256k1_v0_9_2_gej_add_var, 43) - success = success & check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_9_2_gej_add_ge_var", 0, 7, 5, formula_rustsecp256k1_v0_9_2_gej_add_ge_var, 43) - success = success & check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_9_2_gej_add_zinv_var", 0, 7, 5, formula_rustsecp256k1_v0_9_2_gej_add_zinv_var, 43) - success = success & check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_9_2_gej_add_ge", 0, 7, 8, formula_rustsecp256k1_v0_9_2_gej_add_ge, 43) - success = success & (not check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_9_2_gej_add_ge_old [should fail]", 0, 7, 4, formula_rustsecp256k1_v0_9_2_gej_add_ge_old, 43)) + success = success & check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_10_0_gej_add_var", 0, 7, 5, formula_rustsecp256k1_v0_10_0_gej_add_var, 43) + success = success & check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_10_0_gej_add_ge_var", 0, 7, 5, formula_rustsecp256k1_v0_10_0_gej_add_ge_var, 43) + success = success & check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_10_0_gej_add_zinv_var", 0, 7, 5, formula_rustsecp256k1_v0_10_0_gej_add_zinv_var, 43) + success = success & check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_10_0_gej_add_ge", 0, 7, 8, formula_rustsecp256k1_v0_10_0_gej_add_ge, 43) + success = success & (not check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_10_0_gej_add_ge_old [should fail]", 0, 7, 4, formula_rustsecp256k1_v0_10_0_gej_add_ge_old, 43)) sys.exit(int(not success)) diff --git a/secp256k1-sys/depend/secp256k1/src/CMakeLists.txt b/secp256k1-sys/depend/secp256k1/src/CMakeLists.txt index 15552f10f..caac60526 100644 --- a/secp256k1-sys/depend/secp256k1/src/CMakeLists.txt +++ b/secp256k1-sys/depend/secp256k1/src/CMakeLists.txt @@ -1,23 +1,23 @@ # Must be included before CMAKE_INSTALL_INCLUDEDIR is used. include(GNUInstallDirs) -add_library(rustsecp256k1_v0_9_2_precomputed OBJECT EXCLUDE_FROM_ALL +add_library(rustsecp256k1_v0_10_0_precomputed OBJECT EXCLUDE_FROM_ALL precomputed_ecmult.c precomputed_ecmult_gen.c ) # Add objects explicitly rather than linking to the object libs to keep them # from being exported. -add_library(secp256k1 secp256k1.c $) +add_library(secp256k1 secp256k1.c $) -add_library(rustsecp256k1_v0_9_2_asm INTERFACE) +add_library(rustsecp256k1_v0_10_0_asm INTERFACE) if(SECP256K1_ASM STREQUAL "arm32") - add_library(rustsecp256k1_v0_9_2_asm_arm OBJECT EXCLUDE_FROM_ALL) - target_sources(rustsecp256k1_v0_9_2_asm_arm PUBLIC + add_library(rustsecp256k1_v0_10_0_asm_arm OBJECT EXCLUDE_FROM_ALL) + target_sources(rustsecp256k1_v0_10_0_asm_arm PUBLIC asm/field_10x26_arm.s ) - target_sources(secp256k1 PRIVATE $) - target_link_libraries(rustsecp256k1_v0_9_2_asm INTERFACE rustsecp256k1_v0_9_2_asm_arm) + target_sources(secp256k1 PRIVATE $) + target_link_libraries(rustsecp256k1_v0_10_0_asm INTERFACE rustsecp256k1_v0_10_0_asm_arm) endif() if(WIN32) @@ -29,7 +29,7 @@ endif() # Object libs don't know if they're being built for a shared or static lib. # Grab the PIC property from secp256k1 which knows. get_target_property(use_pic secp256k1 POSITION_INDEPENDENT_CODE) -set_target_properties(rustsecp256k1_v0_9_2_precomputed PROPERTIES POSITION_INDEPENDENT_CODE ${use_pic}) +set_target_properties(rustsecp256k1_v0_10_0_precomputed PROPERTIES POSITION_INDEPENDENT_CODE ${use_pic}) target_include_directories(secp256k1 INTERFACE # Add the include path for parent projects so that they don't have to manually add it. @@ -79,27 +79,27 @@ if(SECP256K1_BUILD_BENCHMARK) add_executable(bench bench.c) target_link_libraries(bench secp256k1) add_executable(bench_internal bench_internal.c) - target_link_libraries(bench_internal rustsecp256k1_v0_9_2_precomputed rustsecp256k1_v0_9_2_asm) + target_link_libraries(bench_internal rustsecp256k1_v0_10_0_precomputed rustsecp256k1_v0_10_0_asm) add_executable(bench_ecmult bench_ecmult.c) - target_link_libraries(bench_ecmult rustsecp256k1_v0_9_2_precomputed rustsecp256k1_v0_9_2_asm) + target_link_libraries(bench_ecmult rustsecp256k1_v0_10_0_precomputed rustsecp256k1_v0_10_0_asm) endif() if(SECP256K1_BUILD_TESTS) add_executable(noverify_tests tests.c) - target_link_libraries(noverify_tests rustsecp256k1_v0_9_2_precomputed rustsecp256k1_v0_9_2_asm) + target_link_libraries(noverify_tests rustsecp256k1_v0_10_0_precomputed rustsecp256k1_v0_10_0_asm) add_test(NAME noverify_tests COMMAND noverify_tests) if(NOT CMAKE_BUILD_TYPE STREQUAL "Coverage") add_executable(tests tests.c) target_compile_definitions(tests PRIVATE VERIFY) - target_link_libraries(tests rustsecp256k1_v0_9_2_precomputed rustsecp256k1_v0_9_2_asm) + target_link_libraries(tests rustsecp256k1_v0_10_0_precomputed rustsecp256k1_v0_10_0_asm) add_test(NAME tests COMMAND tests) endif() endif() if(SECP256K1_BUILD_EXHAUSTIVE_TESTS) - # Note: do not include rustsecp256k1_v0_9_2_precomputed in exhaustive_tests (it uses runtime-generated tables). + # Note: do not include rustsecp256k1_v0_10_0_precomputed in exhaustive_tests (it uses runtime-generated tables). add_executable(exhaustive_tests tests_exhaustive.c) - target_link_libraries(exhaustive_tests rustsecp256k1_v0_9_2_asm) + target_link_libraries(exhaustive_tests rustsecp256k1_v0_10_0_asm) target_compile_definitions(exhaustive_tests PRIVATE $<$>:VERIFY>) add_test(NAME exhaustive_tests COMMAND exhaustive_tests) endif() @@ -118,22 +118,22 @@ if(SECP256K1_INSTALL) ) set(${PROJECT_NAME}_headers "${PROJECT_SOURCE_DIR}/include/secp256k1.h" - "${PROJECT_SOURCE_DIR}/include/rustsecp256k1_v0_9_2_preallocated.h" + "${PROJECT_SOURCE_DIR}/include/rustsecp256k1_v0_10_0_preallocated.h" ) if(SECP256K1_ENABLE_MODULE_ECDH) - list(APPEND ${PROJECT_NAME}_headers "${PROJECT_SOURCE_DIR}/include/rustsecp256k1_v0_9_2_ecdh.h") + list(APPEND ${PROJECT_NAME}_headers "${PROJECT_SOURCE_DIR}/include/rustsecp256k1_v0_10_0_ecdh.h") endif() if(SECP256K1_ENABLE_MODULE_RECOVERY) - list(APPEND ${PROJECT_NAME}_headers "${PROJECT_SOURCE_DIR}/include/rustsecp256k1_v0_9_2_recovery.h") + list(APPEND ${PROJECT_NAME}_headers "${PROJECT_SOURCE_DIR}/include/rustsecp256k1_v0_10_0_recovery.h") endif() if(SECP256K1_ENABLE_MODULE_EXTRAKEYS) - list(APPEND ${PROJECT_NAME}_headers "${PROJECT_SOURCE_DIR}/include/rustsecp256k1_v0_9_2_extrakeys.h") + list(APPEND ${PROJECT_NAME}_headers "${PROJECT_SOURCE_DIR}/include/rustsecp256k1_v0_10_0_extrakeys.h") endif() if(SECP256K1_ENABLE_MODULE_SCHNORRSIG) - list(APPEND ${PROJECT_NAME}_headers "${PROJECT_SOURCE_DIR}/include/rustsecp256k1_v0_9_2_schnorrsig.h") + list(APPEND ${PROJECT_NAME}_headers "${PROJECT_SOURCE_DIR}/include/rustsecp256k1_v0_10_0_schnorrsig.h") endif() if(SECP256K1_ENABLE_MODULE_ELLSWIFT) - list(APPEND ${PROJECT_NAME}_headers "${PROJECT_SOURCE_DIR}/include/rustsecp256k1_v0_9_2_ellswift.h") + list(APPEND ${PROJECT_NAME}_headers "${PROJECT_SOURCE_DIR}/include/rustsecp256k1_v0_10_0_ellswift.h") endif() install(FILES ${${PROJECT_NAME}_headers} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} @@ -161,5 +161,13 @@ if(SECP256K1_INSTALL) ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config-version.cmake DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME} -) + ) + + include(GeneratePkgConfigFile) + generate_pkg_config_file(${PROJECT_SOURCE_DIR}/libsecp256k1.pc.in) + install( + FILES + ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}.pc + DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig + ) endif() diff --git a/secp256k1-sys/depend/secp256k1/src/asm/field_10x26_arm.s b/secp256k1-sys/depend/secp256k1/src/asm/field_10x26_arm.s index 5ec00ad3d..7b90c1a8b 100644 --- a/secp256k1-sys/depend/secp256k1/src/asm/field_10x26_arm.s +++ b/secp256k1-sys/depend/secp256k1/src/asm/field_10x26_arm.s @@ -27,9 +27,9 @@ Note: .set field_not_M, 0xfc000000 @ ~M = ~0x3ffffff .align 2 - .global rustsecp256k1_v0_9_2_fe_mul_inner - .type rustsecp256k1_v0_9_2_fe_mul_inner, %function - .hidden rustsecp256k1_v0_9_2_fe_mul_inner + .global rustsecp256k1_v0_10_0_fe_mul_inner + .type rustsecp256k1_v0_10_0_fe_mul_inner, %function + .hidden rustsecp256k1_v0_10_0_fe_mul_inner @ Arguments: @ r0 r Restrict: can overlap with a, not with b @ r1 a @@ -37,7 +37,7 @@ Note: @ Stack (total 4+10*4 = 44) @ sp + #0 saved 'r' pointer @ sp + #4 + 4*X t0,t1,t2,t3,t4,t5,t6,t7,u8,t9 -rustsecp256k1_v0_9_2_fe_mul_inner: +rustsecp256k1_v0_10_0_fe_mul_inner: stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r14} sub sp, sp, #48 @ frame=44 + alignment str r0, [sp, #0] @ save result address, we need it only at the end @@ -512,19 +512,19 @@ rustsecp256k1_v0_9_2_fe_mul_inner: add sp, sp, #48 ldmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, pc} - .size rustsecp256k1_v0_9_2_fe_mul_inner, .-rustsecp256k1_v0_9_2_fe_mul_inner + .size rustsecp256k1_v0_10_0_fe_mul_inner, .-rustsecp256k1_v0_10_0_fe_mul_inner .align 2 - .global rustsecp256k1_v0_9_2_fe_sqr_inner - .type rustsecp256k1_v0_9_2_fe_sqr_inner, %function - .hidden rustsecp256k1_v0_9_2_fe_sqr_inner + .global rustsecp256k1_v0_10_0_fe_sqr_inner + .type rustsecp256k1_v0_10_0_fe_sqr_inner, %function + .hidden rustsecp256k1_v0_10_0_fe_sqr_inner @ Arguments: @ r0 r Can overlap with a @ r1 a @ Stack (total 4+10*4 = 44) @ sp + #0 saved 'r' pointer @ sp + #4 + 4*X t0,t1,t2,t3,t4,t5,t6,t7,u8,t9 -rustsecp256k1_v0_9_2_fe_sqr_inner: +rustsecp256k1_v0_10_0_fe_sqr_inner: stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r14} sub sp, sp, #48 @ frame=44 + alignment str r0, [sp, #0] @ save result address, we need it only at the end @@ -911,5 +911,6 @@ rustsecp256k1_v0_9_2_fe_sqr_inner: add sp, sp, #48 ldmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, pc} - .size rustsecp256k1_v0_9_2_fe_sqr_inner, .-rustsecp256k1_v0_9_2_fe_sqr_inner + .size rustsecp256k1_v0_10_0_fe_sqr_inner, .-rustsecp256k1_v0_10_0_fe_sqr_inner + .section .note.GNU-stack,"",%progbits diff --git a/secp256k1-sys/depend/secp256k1/src/assumptions.h b/secp256k1-sys/depend/secp256k1/src/assumptions.h index c805181db..d7d69404a 100644 --- a/secp256k1-sys/depend/secp256k1/src/assumptions.h +++ b/secp256k1-sys/depend/secp256k1/src/assumptions.h @@ -19,7 +19,7 @@ reduce the odds of experiencing an unwelcome surprise. */ -struct rustsecp256k1_v0_9_2_assumption_checker { +struct rustsecp256k1_v0_10_0_assumption_checker { /* This uses a trick to implement a static assertion in C89: a type with an array of negative size is not allowed. */ int dummy_array[( diff --git a/secp256k1-sys/depend/secp256k1/src/bench.c b/secp256k1-sys/depend/secp256k1/src/bench.c index f8661bf11..902d7facc 100644 --- a/secp256k1-sys/depend/secp256k1/src/bench.c +++ b/secp256k1-sys/depend/secp256k1/src/bench.c @@ -67,7 +67,7 @@ static void help(int default_iters) { } typedef struct { - rustsecp256k1_v0_9_2_context *ctx; + rustsecp256k1_v0_10_0_context *ctx; unsigned char msg[32]; unsigned char key[32]; unsigned char sig[72]; @@ -81,14 +81,14 @@ static void bench_verify(void* arg, int iters) { bench_data* data = (bench_data*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_9_2_pubkey pubkey; - rustsecp256k1_v0_9_2_ecdsa_signature sig; + rustsecp256k1_v0_10_0_pubkey pubkey; + rustsecp256k1_v0_10_0_ecdsa_signature sig; data->sig[data->siglen - 1] ^= (i & 0xFF); data->sig[data->siglen - 2] ^= ((i >> 8) & 0xFF); data->sig[data->siglen - 3] ^= ((i >> 16) & 0xFF); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_parse(data->ctx, &pubkey, data->pubkey, data->pubkeylen) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_parse_der(data->ctx, &sig, data->sig, data->siglen) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_verify(data->ctx, &sig, data->msg, &pubkey) == (i == 0)); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_parse(data->ctx, &pubkey, data->pubkey, data->pubkeylen) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_parse_der(data->ctx, &sig, data->sig, data->siglen) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_verify(data->ctx, &sig, data->msg, &pubkey) == (i == 0)); data->sig[data->siglen - 1] ^= (i & 0xFF); data->sig[data->siglen - 2] ^= ((i >> 8) & 0xFF); data->sig[data->siglen - 3] ^= ((i >> 16) & 0xFF); @@ -115,9 +115,9 @@ static void bench_sign_run(void* arg, int iters) { for (i = 0; i < iters; i++) { size_t siglen = 74; int j; - rustsecp256k1_v0_9_2_ecdsa_signature signature; - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign(data->ctx, &signature, data->msg, data->key, NULL, NULL)); - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_serialize_der(data->ctx, sig, &siglen, &signature)); + rustsecp256k1_v0_10_0_ecdsa_signature signature; + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign(data->ctx, &signature, data->msg, data->key, NULL, NULL)); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_serialize_der(data->ctx, sig, &siglen, &signature)); for (j = 0; j < 32; j++) { data->msg[j] = sig[j]; data->key[j] = sig[j + 32]; @@ -141,9 +141,9 @@ static void bench_keygen_run(void *arg, int iters) { for (i = 0; i < iters; i++) { unsigned char pub33[33]; size_t len = 33; - rustsecp256k1_v0_9_2_pubkey pubkey; - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(data->ctx, &pubkey, data->key)); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_serialize(data->ctx, pub33, &len, &pubkey, SECP256K1_EC_COMPRESSED)); + rustsecp256k1_v0_10_0_pubkey pubkey; + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(data->ctx, &pubkey, data->key)); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_serialize(data->ctx, pub33, &len, &pubkey, SECP256K1_EC_COMPRESSED)); memcpy(data->key, pub33 + 1, 32); } } @@ -167,8 +167,8 @@ static void bench_keygen_run(void *arg, int iters) { int main(int argc, char** argv) { int i; - rustsecp256k1_v0_9_2_pubkey pubkey; - rustsecp256k1_v0_9_2_ecdsa_signature sig; + rustsecp256k1_v0_10_0_pubkey pubkey; + rustsecp256k1_v0_10_0_ecdsa_signature sig; bench_data data; int d = argc == 1; @@ -232,7 +232,7 @@ int main(int argc, char** argv) { #endif /* ECDSA benchmark */ - data.ctx = rustsecp256k1_v0_9_2_context_create(SECP256K1_CONTEXT_NONE); + data.ctx = rustsecp256k1_v0_10_0_context_create(SECP256K1_CONTEXT_NONE); for (i = 0; i < 32; i++) { data.msg[i] = 1 + i; @@ -241,11 +241,11 @@ int main(int argc, char** argv) { data.key[i] = 33 + i; } data.siglen = 72; - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign(data.ctx, &sig, data.msg, data.key, NULL, NULL)); - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_serialize_der(data.ctx, data.sig, &data.siglen, &sig)); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(data.ctx, &pubkey, data.key)); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign(data.ctx, &sig, data.msg, data.key, NULL, NULL)); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_serialize_der(data.ctx, data.sig, &data.siglen, &sig)); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(data.ctx, &pubkey, data.key)); data.pubkeylen = 33; - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_serialize(data.ctx, data.pubkey, &data.pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_serialize(data.ctx, data.pubkey, &data.pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED) == 1); print_output_table_header_row(); if (d || have_flag(argc, argv, "ecdsa") || have_flag(argc, argv, "verify") || have_flag(argc, argv, "ecdsa_verify")) run_benchmark("ecdsa_verify", bench_verify, NULL, NULL, &data, 10, iters); @@ -253,7 +253,7 @@ int main(int argc, char** argv) { if (d || have_flag(argc, argv, "ecdsa") || have_flag(argc, argv, "sign") || have_flag(argc, argv, "ecdsa_sign")) run_benchmark("ecdsa_sign", bench_sign_run, bench_sign_setup, NULL, &data, 10, iters); if (d || have_flag(argc, argv, "ec") || have_flag(argc, argv, "keygen") || have_flag(argc, argv, "ec_keygen")) run_benchmark("ec_keygen", bench_keygen_run, bench_keygen_setup, NULL, &data, 10, iters); - rustsecp256k1_v0_9_2_context_destroy(data.ctx); + rustsecp256k1_v0_10_0_context_destroy(data.ctx); #ifdef ENABLE_MODULE_ECDH /* ECDH benchmarks */ diff --git a/secp256k1-sys/depend/secp256k1/src/bench_ecmult.c b/secp256k1-sys/depend/secp256k1/src/bench_ecmult.c index 9683468e6..676c1f0ef 100644 --- a/secp256k1-sys/depend/secp256k1/src/bench_ecmult.c +++ b/secp256k1-sys/depend/secp256k1/src/bench_ecmult.c @@ -35,14 +35,14 @@ static void help(char **argv) { typedef struct { /* Setup once in advance */ - rustsecp256k1_v0_9_2_context* ctx; - rustsecp256k1_v0_9_2_scratch_space* scratch; - rustsecp256k1_v0_9_2_scalar* scalars; - rustsecp256k1_v0_9_2_ge* pubkeys; - rustsecp256k1_v0_9_2_gej* pubkeys_gej; - rustsecp256k1_v0_9_2_scalar* seckeys; - rustsecp256k1_v0_9_2_gej* expected_output; - rustsecp256k1_v0_9_2_ecmult_multi_func ecmult_multi; + rustsecp256k1_v0_10_0_context* ctx; + rustsecp256k1_v0_10_0_scratch_space* scratch; + rustsecp256k1_v0_10_0_scalar* scalars; + rustsecp256k1_v0_10_0_ge* pubkeys; + rustsecp256k1_v0_10_0_gej* pubkeys_gej; + rustsecp256k1_v0_10_0_scalar* seckeys; + rustsecp256k1_v0_10_0_gej* expected_output; + rustsecp256k1_v0_10_0_ecmult_multi_func ecmult_multi; /* Changes per benchmark */ size_t count; @@ -54,7 +54,7 @@ typedef struct { size_t offset2; /* Benchmark output. */ - rustsecp256k1_v0_9_2_gej* output; + rustsecp256k1_v0_10_0_gej* output; } bench_data; /* Hashes x into [0, POINTS) twice and store the result in offset1 and offset2. */ @@ -67,24 +67,24 @@ static void hash_into_offset(bench_data* data, size_t x) { * sum(outputs) ?= (sum(scalars_gen) + sum(seckeys)*sum(scalars))*G */ static void bench_ecmult_teardown_helper(bench_data* data, size_t* seckey_offset, size_t* scalar_offset, size_t* scalar_gen_offset, int iters) { int i; - rustsecp256k1_v0_9_2_gej sum_output, tmp; - rustsecp256k1_v0_9_2_scalar sum_scalars; + rustsecp256k1_v0_10_0_gej sum_output, tmp; + rustsecp256k1_v0_10_0_scalar sum_scalars; - rustsecp256k1_v0_9_2_gej_set_infinity(&sum_output); - rustsecp256k1_v0_9_2_scalar_clear(&sum_scalars); + rustsecp256k1_v0_10_0_gej_set_infinity(&sum_output); + rustsecp256k1_v0_10_0_scalar_clear(&sum_scalars); for (i = 0; i < iters; ++i) { - rustsecp256k1_v0_9_2_gej_add_var(&sum_output, &sum_output, &data->output[i], NULL); + rustsecp256k1_v0_10_0_gej_add_var(&sum_output, &sum_output, &data->output[i], NULL); if (scalar_gen_offset != NULL) { - rustsecp256k1_v0_9_2_scalar_add(&sum_scalars, &sum_scalars, &data->scalars[(*scalar_gen_offset+i) % POINTS]); + rustsecp256k1_v0_10_0_scalar_add(&sum_scalars, &sum_scalars, &data->scalars[(*scalar_gen_offset+i) % POINTS]); } if (seckey_offset != NULL) { - rustsecp256k1_v0_9_2_scalar s = data->seckeys[(*seckey_offset+i) % POINTS]; - rustsecp256k1_v0_9_2_scalar_mul(&s, &s, &data->scalars[(*scalar_offset+i) % POINTS]); - rustsecp256k1_v0_9_2_scalar_add(&sum_scalars, &sum_scalars, &s); + rustsecp256k1_v0_10_0_scalar s = data->seckeys[(*seckey_offset+i) % POINTS]; + rustsecp256k1_v0_10_0_scalar_mul(&s, &s, &data->scalars[(*scalar_offset+i) % POINTS]); + rustsecp256k1_v0_10_0_scalar_add(&sum_scalars, &sum_scalars, &s); } } - rustsecp256k1_v0_9_2_ecmult_gen(&data->ctx->ecmult_gen_ctx, &tmp, &sum_scalars); - CHECK(rustsecp256k1_v0_9_2_gej_eq_var(&tmp, &sum_output)); + rustsecp256k1_v0_10_0_ecmult_gen(&data->ctx->ecmult_gen_ctx, &tmp, &sum_scalars); + CHECK(rustsecp256k1_v0_10_0_gej_eq_var(&tmp, &sum_output)); } static void bench_ecmult_setup(void* arg) { @@ -99,7 +99,7 @@ static void bench_ecmult_gen(void* arg, int iters) { int i; for (i = 0; i < iters; ++i) { - rustsecp256k1_v0_9_2_ecmult_gen(&data->ctx->ecmult_gen_ctx, &data->output[i], &data->scalars[(data->offset1+i) % POINTS]); + rustsecp256k1_v0_10_0_ecmult_gen(&data->ctx->ecmult_gen_ctx, &data->output[i], &data->scalars[(data->offset1+i) % POINTS]); } } @@ -113,7 +113,7 @@ static void bench_ecmult_const(void* arg, int iters) { int i; for (i = 0; i < iters; ++i) { - rustsecp256k1_v0_9_2_ecmult_const(&data->output[i], &data->pubkeys[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS]); + rustsecp256k1_v0_10_0_ecmult_const(&data->output[i], &data->pubkeys[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS]); } } @@ -127,7 +127,7 @@ static void bench_ecmult_1p(void* arg, int iters) { int i; for (i = 0; i < iters; ++i) { - rustsecp256k1_v0_9_2_ecmult(&data->output[i], &data->pubkeys_gej[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS], NULL); + rustsecp256k1_v0_10_0_ecmult(&data->output[i], &data->pubkeys_gej[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS], NULL); } } @@ -141,7 +141,7 @@ static void bench_ecmult_0p_g(void* arg, int iters) { int i; for (i = 0; i < iters; ++i) { - rustsecp256k1_v0_9_2_ecmult(&data->output[i], NULL, &rustsecp256k1_v0_9_2_scalar_zero, &data->scalars[(data->offset1+i) % POINTS]); + rustsecp256k1_v0_10_0_ecmult(&data->output[i], NULL, &rustsecp256k1_v0_10_0_scalar_zero, &data->scalars[(data->offset1+i) % POINTS]); } } @@ -155,7 +155,7 @@ static void bench_ecmult_1p_g(void* arg, int iters) { int i; for (i = 0; i < iters/2; ++i) { - rustsecp256k1_v0_9_2_ecmult(&data->output[i], &data->pubkeys_gej[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS], &data->scalars[(data->offset1+i) % POINTS]); + rustsecp256k1_v0_10_0_ecmult(&data->output[i], &data->pubkeys_gej[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS], &data->scalars[(data->offset1+i) % POINTS]); } } @@ -181,12 +181,12 @@ static void run_ecmult_bench(bench_data* data, int iters) { run_benchmark(str, bench_ecmult_1p_g, bench_ecmult_setup, bench_ecmult_1p_g_teardown, data, 10, 2*iters); } -static int bench_ecmult_multi_callback(rustsecp256k1_v0_9_2_scalar* sc, rustsecp256k1_v0_9_2_ge* ge, size_t idx, void* arg) { +static int bench_ecmult_multi_callback(rustsecp256k1_v0_10_0_scalar* sc, rustsecp256k1_v0_10_0_ge* ge, size_t idx, void* arg) { bench_data* data = (bench_data*)arg; if (data->includes_g) ++idx; if (idx == 0) { *sc = data->scalars[data->offset1]; - *ge = rustsecp256k1_v0_9_2_ge_const_g; + *ge = rustsecp256k1_v0_10_0_ge_const_g; } else { *sc = data->scalars[(data->offset1 + idx) % POINTS]; *ge = data->pubkeys[(data->offset2 + idx - 1) % POINTS]; @@ -220,14 +220,14 @@ static void bench_ecmult_multi_teardown(void* arg, int iters) { iters = iters / data->count; /* Verify the results in teardown, to avoid doing comparisons while benchmarking. */ for (iter = 0; iter < iters; ++iter) { - rustsecp256k1_v0_9_2_gej tmp; - rustsecp256k1_v0_9_2_gej_add_var(&tmp, &data->output[iter], &data->expected_output[iter], NULL); - CHECK(rustsecp256k1_v0_9_2_gej_is_infinity(&tmp)); + rustsecp256k1_v0_10_0_gej tmp; + rustsecp256k1_v0_10_0_gej_add_var(&tmp, &data->output[iter], &data->expected_output[iter], NULL); + CHECK(rustsecp256k1_v0_10_0_gej_is_infinity(&tmp)); } } -static void generate_scalar(uint32_t num, rustsecp256k1_v0_9_2_scalar* scalar) { - rustsecp256k1_v0_9_2_sha256 sha256; +static void generate_scalar(uint32_t num, rustsecp256k1_v0_10_0_scalar* scalar) { + rustsecp256k1_v0_10_0_sha256 sha256; unsigned char c[10] = {'e', 'c', 'm', 'u', 'l', 't', 0, 0, 0, 0}; unsigned char buf[32]; int overflow = 0; @@ -235,10 +235,10 @@ static void generate_scalar(uint32_t num, rustsecp256k1_v0_9_2_scalar* scalar) { c[7] = num >> 8; c[8] = num >> 16; c[9] = num >> 24; - rustsecp256k1_v0_9_2_sha256_initialize(&sha256); - rustsecp256k1_v0_9_2_sha256_write(&sha256, c, sizeof(c)); - rustsecp256k1_v0_9_2_sha256_finalize(&sha256, buf); - rustsecp256k1_v0_9_2_scalar_set_b32(scalar, buf, &overflow); + rustsecp256k1_v0_10_0_sha256_initialize(&sha256); + rustsecp256k1_v0_10_0_sha256_write(&sha256, c, sizeof(c)); + rustsecp256k1_v0_10_0_sha256_finalize(&sha256, buf); + rustsecp256k1_v0_10_0_scalar_set_b32(scalar, buf, &overflow); CHECK(!overflow); } @@ -253,15 +253,15 @@ static void run_ecmult_multi_bench(bench_data* data, size_t count, int includes_ /* Compute (the negation of) the expected results directly. */ hash_into_offset(data, data->count); for (iter = 0; iter < iters; ++iter) { - rustsecp256k1_v0_9_2_scalar tmp; - rustsecp256k1_v0_9_2_scalar total = data->scalars[(data->offset1++) % POINTS]; + rustsecp256k1_v0_10_0_scalar tmp; + rustsecp256k1_v0_10_0_scalar total = data->scalars[(data->offset1++) % POINTS]; size_t i = 0; for (i = 0; i + 1 < count; ++i) { - rustsecp256k1_v0_9_2_scalar_mul(&tmp, &data->seckeys[(data->offset2++) % POINTS], &data->scalars[(data->offset1++) % POINTS]); - rustsecp256k1_v0_9_2_scalar_add(&total, &total, &tmp); + rustsecp256k1_v0_10_0_scalar_mul(&tmp, &data->seckeys[(data->offset2++) % POINTS], &data->scalars[(data->offset1++) % POINTS]); + rustsecp256k1_v0_10_0_scalar_add(&total, &total, &tmp); } - rustsecp256k1_v0_9_2_scalar_negate(&total, &total); - rustsecp256k1_v0_9_2_ecmult(&data->expected_output[iter], NULL, &rustsecp256k1_v0_9_2_scalar_zero, &total); + rustsecp256k1_v0_10_0_scalar_negate(&total, &total); + rustsecp256k1_v0_10_0_ecmult(&data->expected_output[iter], NULL, &rustsecp256k1_v0_10_0_scalar_zero, &total); } /* Run the benchmark. */ @@ -280,7 +280,7 @@ int main(int argc, char **argv) { int iters = get_iters(10000); - data.ecmult_multi = rustsecp256k1_v0_9_2_ecmult_multi_var; + data.ecmult_multi = rustsecp256k1_v0_10_0_ecmult_multi_var; if (argc > 1) { if(have_flag(argc, argv, "-h") @@ -290,10 +290,10 @@ int main(int argc, char **argv) { return 0; } else if(have_flag(argc, argv, "pippenger_wnaf")) { printf("Using pippenger_wnaf:\n"); - data.ecmult_multi = rustsecp256k1_v0_9_2_ecmult_pippenger_batch_single; + data.ecmult_multi = rustsecp256k1_v0_10_0_ecmult_pippenger_batch_single; } else if(have_flag(argc, argv, "strauss_wnaf")) { printf("Using strauss_wnaf:\n"); - data.ecmult_multi = rustsecp256k1_v0_9_2_ecmult_strauss_batch_single; + data.ecmult_multi = rustsecp256k1_v0_10_0_ecmult_strauss_batch_single; } else if(have_flag(argc, argv, "simple")) { printf("Using simple algorithm:\n"); } else { @@ -303,33 +303,33 @@ int main(int argc, char **argv) { } } - data.ctx = rustsecp256k1_v0_9_2_context_create(SECP256K1_CONTEXT_NONE); - scratch_size = rustsecp256k1_v0_9_2_strauss_scratch_size(POINTS) + STRAUSS_SCRATCH_OBJECTS*16; + data.ctx = rustsecp256k1_v0_10_0_context_create(SECP256K1_CONTEXT_NONE); + scratch_size = rustsecp256k1_v0_10_0_strauss_scratch_size(POINTS) + STRAUSS_SCRATCH_OBJECTS*16; if (!have_flag(argc, argv, "simple")) { - data.scratch = rustsecp256k1_v0_9_2_scratch_space_create(data.ctx, scratch_size); + data.scratch = rustsecp256k1_v0_10_0_scratch_space_create(data.ctx, scratch_size); } else { data.scratch = NULL; } /* Allocate stuff */ - data.scalars = malloc(sizeof(rustsecp256k1_v0_9_2_scalar) * POINTS); - data.seckeys = malloc(sizeof(rustsecp256k1_v0_9_2_scalar) * POINTS); - data.pubkeys = malloc(sizeof(rustsecp256k1_v0_9_2_ge) * POINTS); - data.pubkeys_gej = malloc(sizeof(rustsecp256k1_v0_9_2_gej) * POINTS); - data.expected_output = malloc(sizeof(rustsecp256k1_v0_9_2_gej) * (iters + 1)); - data.output = malloc(sizeof(rustsecp256k1_v0_9_2_gej) * (iters + 1)); + data.scalars = malloc(sizeof(rustsecp256k1_v0_10_0_scalar) * POINTS); + data.seckeys = malloc(sizeof(rustsecp256k1_v0_10_0_scalar) * POINTS); + data.pubkeys = malloc(sizeof(rustsecp256k1_v0_10_0_ge) * POINTS); + data.pubkeys_gej = malloc(sizeof(rustsecp256k1_v0_10_0_gej) * POINTS); + data.expected_output = malloc(sizeof(rustsecp256k1_v0_10_0_gej) * (iters + 1)); + data.output = malloc(sizeof(rustsecp256k1_v0_10_0_gej) * (iters + 1)); /* Generate a set of scalars, and private/public keypairs. */ - rustsecp256k1_v0_9_2_gej_set_ge(&data.pubkeys_gej[0], &rustsecp256k1_v0_9_2_ge_const_g); - rustsecp256k1_v0_9_2_scalar_set_int(&data.seckeys[0], 1); + rustsecp256k1_v0_10_0_gej_set_ge(&data.pubkeys_gej[0], &rustsecp256k1_v0_10_0_ge_const_g); + rustsecp256k1_v0_10_0_scalar_set_int(&data.seckeys[0], 1); for (i = 0; i < POINTS; ++i) { generate_scalar(i, &data.scalars[i]); if (i) { - rustsecp256k1_v0_9_2_gej_double_var(&data.pubkeys_gej[i], &data.pubkeys_gej[i - 1], NULL); - rustsecp256k1_v0_9_2_scalar_add(&data.seckeys[i], &data.seckeys[i - 1], &data.seckeys[i - 1]); + rustsecp256k1_v0_10_0_gej_double_var(&data.pubkeys_gej[i], &data.pubkeys_gej[i - 1], NULL); + rustsecp256k1_v0_10_0_scalar_add(&data.seckeys[i], &data.seckeys[i - 1], &data.seckeys[i - 1]); } } - rustsecp256k1_v0_9_2_ge_set_all_gej_var(data.pubkeys, data.pubkeys_gej, POINTS); + rustsecp256k1_v0_10_0_ge_set_all_gej_var(data.pubkeys, data.pubkeys_gej, POINTS); print_output_table_header_row(); @@ -353,9 +353,9 @@ int main(int argc, char **argv) { } if (data.scratch != NULL) { - rustsecp256k1_v0_9_2_scratch_space_destroy(data.ctx, data.scratch); + rustsecp256k1_v0_10_0_scratch_space_destroy(data.ctx, data.scratch); } - rustsecp256k1_v0_9_2_context_destroy(data.ctx); + rustsecp256k1_v0_10_0_context_destroy(data.ctx); free(data.scalars); free(data.pubkeys); free(data.pubkeys_gej); diff --git a/secp256k1-sys/depend/secp256k1/src/bench_internal.c b/secp256k1-sys/depend/secp256k1/src/bench_internal.c index af8d3b8e3..913bb9c0b 100644 --- a/secp256k1-sys/depend/secp256k1/src/bench_internal.c +++ b/secp256k1-sys/depend/secp256k1/src/bench_internal.c @@ -14,15 +14,33 @@ #include "field_impl.h" #include "group_impl.h" #include "scalar_impl.h" -#include "ecmult_const_impl.h" #include "ecmult_impl.h" #include "bench.h" +static void help(int default_iters) { + printf("Benchmarks various internal routines.\n"); + printf("\n"); + printf("The default number of iterations for each benchmark is %d. This can be\n", default_iters); + printf("customized using the SECP256K1_BENCH_ITERS environment variable.\n"); + printf("\n"); + printf("Usage: ./bench_internal [args]\n"); + printf("By default, all benchmarks will be run.\n"); + printf("args:\n"); + printf(" help : display this help and exit\n"); + printf(" scalar : all scalar operations (add, half, inverse, mul, negate, split)\n"); + printf(" field : all field operations (half, inverse, issquare, mul, normalize, sqr, sqrt)\n"); + printf(" group : all group operations (add, double, to_affine)\n"); + printf(" ecmult : all point multiplication operations (ecmult_wnaf) \n"); + printf(" hash : all hash algorithms (hmac, rng6979, sha256)\n"); + printf(" context : all context object operations (context_create)\n"); + printf("\n"); +} + typedef struct { - rustsecp256k1_v0_9_2_scalar scalar[2]; - rustsecp256k1_v0_9_2_fe fe[4]; - rustsecp256k1_v0_9_2_ge ge[2]; - rustsecp256k1_v0_9_2_gej gej[2]; + rustsecp256k1_v0_10_0_scalar scalar[2]; + rustsecp256k1_v0_10_0_fe fe[4]; + rustsecp256k1_v0_10_0_ge ge[2]; + rustsecp256k1_v0_10_0_gej gej[2]; unsigned char data[64]; int wnaf[256]; } bench_inv; @@ -63,18 +81,18 @@ static void bench_setup(void* arg) { } }; - rustsecp256k1_v0_9_2_scalar_set_b32(&data->scalar[0], init[0], NULL); - rustsecp256k1_v0_9_2_scalar_set_b32(&data->scalar[1], init[1], NULL); - rustsecp256k1_v0_9_2_fe_set_b32_limit(&data->fe[0], init[0]); - rustsecp256k1_v0_9_2_fe_set_b32_limit(&data->fe[1], init[1]); - rustsecp256k1_v0_9_2_fe_set_b32_limit(&data->fe[2], init[2]); - rustsecp256k1_v0_9_2_fe_set_b32_limit(&data->fe[3], init[3]); - CHECK(rustsecp256k1_v0_9_2_ge_set_xo_var(&data->ge[0], &data->fe[0], 0)); - CHECK(rustsecp256k1_v0_9_2_ge_set_xo_var(&data->ge[1], &data->fe[1], 1)); - rustsecp256k1_v0_9_2_gej_set_ge(&data->gej[0], &data->ge[0]); - rustsecp256k1_v0_9_2_gej_rescale(&data->gej[0], &data->fe[2]); - rustsecp256k1_v0_9_2_gej_set_ge(&data->gej[1], &data->ge[1]); - rustsecp256k1_v0_9_2_gej_rescale(&data->gej[1], &data->fe[3]); + rustsecp256k1_v0_10_0_scalar_set_b32(&data->scalar[0], init[0], NULL); + rustsecp256k1_v0_10_0_scalar_set_b32(&data->scalar[1], init[1], NULL); + rustsecp256k1_v0_10_0_fe_set_b32_limit(&data->fe[0], init[0]); + rustsecp256k1_v0_10_0_fe_set_b32_limit(&data->fe[1], init[1]); + rustsecp256k1_v0_10_0_fe_set_b32_limit(&data->fe[2], init[2]); + rustsecp256k1_v0_10_0_fe_set_b32_limit(&data->fe[3], init[3]); + CHECK(rustsecp256k1_v0_10_0_ge_set_xo_var(&data->ge[0], &data->fe[0], 0)); + CHECK(rustsecp256k1_v0_10_0_ge_set_xo_var(&data->ge[1], &data->fe[1], 1)); + rustsecp256k1_v0_10_0_gej_set_ge(&data->gej[0], &data->ge[0]); + rustsecp256k1_v0_10_0_gej_rescale(&data->gej[0], &data->fe[2]); + rustsecp256k1_v0_10_0_gej_set_ge(&data->gej[1], &data->ge[1]); + rustsecp256k1_v0_10_0_gej_rescale(&data->gej[1], &data->fe[3]); memcpy(data->data, init[0], 32); memcpy(data->data + 32, init[1], 32); } @@ -84,7 +102,7 @@ static void bench_scalar_add(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - j += rustsecp256k1_v0_9_2_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); + j += rustsecp256k1_v0_10_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); } CHECK(j <= iters); } @@ -94,8 +112,20 @@ static void bench_scalar_negate(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_9_2_scalar_negate(&data->scalar[0], &data->scalar[0]); + rustsecp256k1_v0_10_0_scalar_negate(&data->scalar[0], &data->scalar[0]); + } +} + +static void bench_scalar_half(void* arg, int iters) { + int i; + bench_inv *data = (bench_inv*)arg; + rustsecp256k1_v0_10_0_scalar s = data->scalar[0]; + + for (i = 0; i < iters; i++) { + rustsecp256k1_v0_10_0_scalar_half(&s, &s); } + + data->scalar[0] = s; } static void bench_scalar_mul(void* arg, int iters) { @@ -103,18 +133,18 @@ static void bench_scalar_mul(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_9_2_scalar_mul(&data->scalar[0], &data->scalar[0], &data->scalar[1]); + rustsecp256k1_v0_10_0_scalar_mul(&data->scalar[0], &data->scalar[0], &data->scalar[1]); } } static void bench_scalar_split(void* arg, int iters) { int i, j = 0; bench_inv *data = (bench_inv*)arg; - rustsecp256k1_v0_9_2_scalar tmp; + rustsecp256k1_v0_10_0_scalar tmp; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_9_2_scalar_split_lambda(&tmp, &data->scalar[1], &data->scalar[0]); - j += rustsecp256k1_v0_9_2_scalar_add(&data->scalar[0], &tmp, &data->scalar[1]); + rustsecp256k1_v0_10_0_scalar_split_lambda(&tmp, &data->scalar[1], &data->scalar[0]); + j += rustsecp256k1_v0_10_0_scalar_add(&data->scalar[0], &tmp, &data->scalar[1]); } CHECK(j <= iters); } @@ -124,8 +154,8 @@ static void bench_scalar_inverse(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_9_2_scalar_inverse(&data->scalar[0], &data->scalar[0]); - j += rustsecp256k1_v0_9_2_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); + rustsecp256k1_v0_10_0_scalar_inverse(&data->scalar[0], &data->scalar[0]); + j += rustsecp256k1_v0_10_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); } CHECK(j <= iters); } @@ -135,8 +165,8 @@ static void bench_scalar_inverse_var(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_9_2_scalar_inverse_var(&data->scalar[0], &data->scalar[0]); - j += rustsecp256k1_v0_9_2_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); + rustsecp256k1_v0_10_0_scalar_inverse_var(&data->scalar[0], &data->scalar[0]); + j += rustsecp256k1_v0_10_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); } CHECK(j <= iters); } @@ -146,7 +176,7 @@ static void bench_field_half(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_9_2_fe_half(&data->fe[0]); + rustsecp256k1_v0_10_0_fe_half(&data->fe[0]); } } @@ -155,7 +185,7 @@ static void bench_field_normalize(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_9_2_fe_normalize(&data->fe[0]); + rustsecp256k1_v0_10_0_fe_normalize(&data->fe[0]); } } @@ -164,7 +194,7 @@ static void bench_field_normalize_weak(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_9_2_fe_normalize_weak(&data->fe[0]); + rustsecp256k1_v0_10_0_fe_normalize_weak(&data->fe[0]); } } @@ -173,7 +203,7 @@ static void bench_field_mul(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_9_2_fe_mul(&data->fe[0], &data->fe[0], &data->fe[1]); + rustsecp256k1_v0_10_0_fe_mul(&data->fe[0], &data->fe[0], &data->fe[1]); } } @@ -182,7 +212,7 @@ static void bench_field_sqr(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_9_2_fe_sqr(&data->fe[0], &data->fe[0]); + rustsecp256k1_v0_10_0_fe_sqr(&data->fe[0], &data->fe[0]); } } @@ -191,8 +221,8 @@ static void bench_field_inverse(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_9_2_fe_inv(&data->fe[0], &data->fe[0]); - rustsecp256k1_v0_9_2_fe_add(&data->fe[0], &data->fe[1]); + rustsecp256k1_v0_10_0_fe_inv(&data->fe[0], &data->fe[0]); + rustsecp256k1_v0_10_0_fe_add(&data->fe[0], &data->fe[1]); } } @@ -201,20 +231,20 @@ static void bench_field_inverse_var(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_9_2_fe_inv_var(&data->fe[0], &data->fe[0]); - rustsecp256k1_v0_9_2_fe_add(&data->fe[0], &data->fe[1]); + rustsecp256k1_v0_10_0_fe_inv_var(&data->fe[0], &data->fe[0]); + rustsecp256k1_v0_10_0_fe_add(&data->fe[0], &data->fe[1]); } } static void bench_field_sqrt(void* arg, int iters) { int i, j = 0; bench_inv *data = (bench_inv*)arg; - rustsecp256k1_v0_9_2_fe t; + rustsecp256k1_v0_10_0_fe t; for (i = 0; i < iters; i++) { t = data->fe[0]; - j += rustsecp256k1_v0_9_2_fe_sqrt(&data->fe[0], &t); - rustsecp256k1_v0_9_2_fe_add(&data->fe[0], &data->fe[1]); + j += rustsecp256k1_v0_10_0_fe_sqrt(&data->fe[0], &t); + rustsecp256k1_v0_10_0_fe_add(&data->fe[0], &data->fe[1]); } CHECK(j <= iters); } @@ -222,12 +252,12 @@ static void bench_field_sqrt(void* arg, int iters) { static void bench_field_is_square_var(void* arg, int iters) { int i, j = 0; bench_inv *data = (bench_inv*)arg; - rustsecp256k1_v0_9_2_fe t = data->fe[0]; + rustsecp256k1_v0_10_0_fe t = data->fe[0]; for (i = 0; i < iters; i++) { - j += rustsecp256k1_v0_9_2_fe_is_square_var(&t); - rustsecp256k1_v0_9_2_fe_add(&t, &data->fe[1]); - rustsecp256k1_v0_9_2_fe_normalize_var(&t); + j += rustsecp256k1_v0_10_0_fe_is_square_var(&t); + rustsecp256k1_v0_10_0_fe_add(&t, &data->fe[1]); + rustsecp256k1_v0_10_0_fe_normalize_var(&t); } CHECK(j <= iters); } @@ -237,7 +267,7 @@ static void bench_group_double_var(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_9_2_gej_double_var(&data->gej[0], &data->gej[0], NULL); + rustsecp256k1_v0_10_0_gej_double_var(&data->gej[0], &data->gej[0], NULL); } } @@ -246,7 +276,7 @@ static void bench_group_add_var(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_9_2_gej_add_var(&data->gej[0], &data->gej[0], &data->gej[1], NULL); + rustsecp256k1_v0_10_0_gej_add_var(&data->gej[0], &data->gej[0], &data->gej[1], NULL); } } @@ -255,7 +285,7 @@ static void bench_group_add_affine(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_9_2_gej_add_ge(&data->gej[0], &data->gej[0], &data->ge[1]); + rustsecp256k1_v0_10_0_gej_add_ge(&data->gej[0], &data->gej[0], &data->ge[1]); } } @@ -264,7 +294,7 @@ static void bench_group_add_affine_var(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_9_2_gej_add_ge_var(&data->gej[0], &data->gej[0], &data->ge[1], NULL); + rustsecp256k1_v0_10_0_gej_add_ge_var(&data->gej[0], &data->gej[0], &data->ge[1], NULL); } } @@ -273,7 +303,7 @@ static void bench_group_add_zinv_var(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_9_2_gej_add_zinv_var(&data->gej[0], &data->gej[0], &data->ge[1], &data->gej[0].y); + rustsecp256k1_v0_10_0_gej_add_zinv_var(&data->gej[0], &data->gej[0], &data->ge[1], &data->gej[0].y); } } @@ -282,18 +312,18 @@ static void bench_group_to_affine_var(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; ++i) { - rustsecp256k1_v0_9_2_ge_set_gej_var(&data->ge[1], &data->gej[0]); + rustsecp256k1_v0_10_0_ge_set_gej_var(&data->ge[1], &data->gej[0]); /* Use the output affine X/Y coordinates to vary the input X/Y/Z coordinates. Note that the resulting coordinates will generally not correspond to a point on the curve, but this is not a problem for the code being benchmarked here. Adding and normalizing have less overhead than EC operations (which could guarantee the point remains on the curve). */ - rustsecp256k1_v0_9_2_fe_add(&data->gej[0].x, &data->ge[1].y); - rustsecp256k1_v0_9_2_fe_add(&data->gej[0].y, &data->fe[2]); - rustsecp256k1_v0_9_2_fe_add(&data->gej[0].z, &data->ge[1].x); - rustsecp256k1_v0_9_2_fe_normalize_var(&data->gej[0].x); - rustsecp256k1_v0_9_2_fe_normalize_var(&data->gej[0].y); - rustsecp256k1_v0_9_2_fe_normalize_var(&data->gej[0].z); + rustsecp256k1_v0_10_0_fe_add(&data->gej[0].x, &data->ge[1].y); + rustsecp256k1_v0_10_0_fe_add(&data->gej[0].y, &data->fe[2]); + rustsecp256k1_v0_10_0_fe_add(&data->gej[0].z, &data->ge[1].x); + rustsecp256k1_v0_10_0_fe_normalize_var(&data->gej[0].x); + rustsecp256k1_v0_10_0_fe_normalize_var(&data->gej[0].y); + rustsecp256k1_v0_10_0_fe_normalize_var(&data->gej[0].z); } } @@ -302,20 +332,8 @@ static void bench_ecmult_wnaf(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - bits += rustsecp256k1_v0_9_2_ecmult_wnaf(data->wnaf, 256, &data->scalar[0], WINDOW_A); - overflow += rustsecp256k1_v0_9_2_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); - } - CHECK(overflow >= 0); - CHECK(bits <= 256*iters); -} - -static void bench_wnaf_const(void* arg, int iters) { - int i, bits = 0, overflow = 0; - bench_inv *data = (bench_inv*)arg; - - for (i = 0; i < iters; i++) { - bits += rustsecp256k1_v0_9_2_wnaf_const(data->wnaf, &data->scalar[0], WINDOW_A, 256); - overflow += rustsecp256k1_v0_9_2_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); + bits += rustsecp256k1_v0_10_0_ecmult_wnaf(data->wnaf, 256, &data->scalar[0], WINDOW_A); + overflow += rustsecp256k1_v0_10_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); } CHECK(overflow >= 0); CHECK(bits <= 256*iters); @@ -324,35 +342,35 @@ static void bench_wnaf_const(void* arg, int iters) { static void bench_sha256(void* arg, int iters) { int i; bench_inv *data = (bench_inv*)arg; - rustsecp256k1_v0_9_2_sha256 sha; + rustsecp256k1_v0_10_0_sha256 sha; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_9_2_sha256_initialize(&sha); - rustsecp256k1_v0_9_2_sha256_write(&sha, data->data, 32); - rustsecp256k1_v0_9_2_sha256_finalize(&sha, data->data); + rustsecp256k1_v0_10_0_sha256_initialize(&sha); + rustsecp256k1_v0_10_0_sha256_write(&sha, data->data, 32); + rustsecp256k1_v0_10_0_sha256_finalize(&sha, data->data); } } static void bench_hmac_sha256(void* arg, int iters) { int i; bench_inv *data = (bench_inv*)arg; - rustsecp256k1_v0_9_2_hmac_sha256 hmac; + rustsecp256k1_v0_10_0_hmac_sha256 hmac; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_9_2_hmac_sha256_initialize(&hmac, data->data, 32); - rustsecp256k1_v0_9_2_hmac_sha256_write(&hmac, data->data, 32); - rustsecp256k1_v0_9_2_hmac_sha256_finalize(&hmac, data->data); + rustsecp256k1_v0_10_0_hmac_sha256_initialize(&hmac, data->data, 32); + rustsecp256k1_v0_10_0_hmac_sha256_write(&hmac, data->data, 32); + rustsecp256k1_v0_10_0_hmac_sha256_finalize(&hmac, data->data); } } static void bench_rfc6979_hmac_sha256(void* arg, int iters) { int i; bench_inv *data = (bench_inv*)arg; - rustsecp256k1_v0_9_2_rfc6979_hmac_sha256 rng; + rustsecp256k1_v0_10_0_rfc6979_hmac_sha256 rng; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_9_2_rfc6979_hmac_sha256_initialize(&rng, data->data, 64); - rustsecp256k1_v0_9_2_rfc6979_hmac_sha256_generate(&rng, data->data, 32); + rustsecp256k1_v0_10_0_rfc6979_hmac_sha256_initialize(&rng, data->data, 64); + rustsecp256k1_v0_10_0_rfc6979_hmac_sha256_generate(&rng, data->data, 32); } } @@ -360,16 +378,28 @@ static void bench_context(void* arg, int iters) { int i; (void)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_9_2_context_destroy(rustsecp256k1_v0_9_2_context_create(SECP256K1_CONTEXT_NONE)); + rustsecp256k1_v0_10_0_context_destroy(rustsecp256k1_v0_10_0_context_create(SECP256K1_CONTEXT_NONE)); } } int main(int argc, char **argv) { bench_inv data; - int iters = get_iters(20000); + int default_iters = 20000; + int iters = get_iters(default_iters); int d = argc == 1; /* default */ + + if (argc > 1) { + if (have_flag(argc, argv, "-h") + || have_flag(argc, argv, "--help") + || have_flag(argc, argv, "help")) { + help(default_iters); + return 0; + } + } + print_output_table_header_row(); + if (d || have_flag(argc, argv, "scalar") || have_flag(argc, argv, "half")) run_benchmark("scalar_half", bench_scalar_half, bench_setup, NULL, &data, 10, iters*100); if (d || have_flag(argc, argv, "scalar") || have_flag(argc, argv, "add")) run_benchmark("scalar_add", bench_scalar_add, bench_setup, NULL, &data, 10, iters*100); if (d || have_flag(argc, argv, "scalar") || have_flag(argc, argv, "negate")) run_benchmark("scalar_negate", bench_scalar_negate, bench_setup, NULL, &data, 10, iters*100); if (d || have_flag(argc, argv, "scalar") || have_flag(argc, argv, "mul")) run_benchmark("scalar_mul", bench_scalar_mul, bench_setup, NULL, &data, 10, iters*10); @@ -394,7 +424,6 @@ int main(int argc, char **argv) { if (d || have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_zinv_var", bench_group_add_zinv_var, bench_setup, NULL, &data, 10, iters*10); if (d || have_flag(argc, argv, "group") || have_flag(argc, argv, "to_affine")) run_benchmark("group_to_affine_var", bench_group_to_affine_var, bench_setup, NULL, &data, 10, iters); - if (d || have_flag(argc, argv, "ecmult") || have_flag(argc, argv, "wnaf")) run_benchmark("wnaf_const", bench_wnaf_const, bench_setup, NULL, &data, 10, iters); if (d || have_flag(argc, argv, "ecmult") || have_flag(argc, argv, "wnaf")) run_benchmark("ecmult_wnaf", bench_ecmult_wnaf, bench_setup, NULL, &data, 10, iters); if (d || have_flag(argc, argv, "hash") || have_flag(argc, argv, "sha256")) run_benchmark("hash_sha256", bench_sha256, bench_setup, NULL, &data, 10, iters); diff --git a/secp256k1-sys/depend/secp256k1/src/ctime_tests.c b/secp256k1-sys/depend/secp256k1/src/ctime_tests.c index f8e54cb54..4149e0ef1 100644 --- a/secp256k1-sys/depend/secp256k1/src/ctime_tests.c +++ b/secp256k1-sys/depend/secp256k1/src/ctime_tests.c @@ -15,15 +15,15 @@ #endif #ifdef ENABLE_MODULE_ECDH -# include "../include/rustsecp256k1_v0_9_2_ecdh.h" +# include "../include/rustsecp256k1_v0_10_0_ecdh.h" #endif #ifdef ENABLE_MODULE_RECOVERY -# include "../include/rustsecp256k1_v0_9_2_recovery.h" +# include "../include/rustsecp256k1_v0_10_0_recovery.h" #endif #ifdef ENABLE_MODULE_EXTRAKEYS -# include "../include/rustsecp256k1_v0_9_2_extrakeys.h" +# include "../include/rustsecp256k1_v0_10_0_extrakeys.h" #endif #ifdef ENABLE_MODULE_SCHNORRSIG @@ -34,10 +34,10 @@ #include "../include/secp256k1_ellswift.h" #endif -static void run_tests(rustsecp256k1_v0_9_2_context *ctx, unsigned char *key); +static void run_tests(rustsecp256k1_v0_10_0_context *ctx, unsigned char *key); int main(void) { - rustsecp256k1_v0_9_2_context* ctx; + rustsecp256k1_v0_10_0_context* ctx; unsigned char key[32]; int ret, i; @@ -46,7 +46,7 @@ int main(void) { fprintf(stderr, "Usage: libtool --mode=execute valgrind ./ctime_tests\n"); return 1; } - ctx = rustsecp256k1_v0_9_2_context_create(SECP256K1_CONTEXT_DECLASSIFY); + ctx = rustsecp256k1_v0_10_0_context_create(SECP256K1_CONTEXT_DECLASSIFY); /** In theory, testing with a single secret input should be sufficient: * If control flow depended on secrets the tool would generate an error. */ @@ -59,17 +59,17 @@ int main(void) { /* Test context randomisation. Do this last because it leaves the context * tainted. */ SECP256K1_CHECKMEM_UNDEFINE(key, 32); - ret = rustsecp256k1_v0_9_2_context_randomize(ctx, key); + ret = rustsecp256k1_v0_10_0_context_randomize(ctx, key); SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret)); CHECK(ret); - rustsecp256k1_v0_9_2_context_destroy(ctx); + rustsecp256k1_v0_10_0_context_destroy(ctx); return 0; } -static void run_tests(rustsecp256k1_v0_9_2_context *ctx, unsigned char *key) { - rustsecp256k1_v0_9_2_ecdsa_signature signature; - rustsecp256k1_v0_9_2_pubkey pubkey; +static void run_tests(rustsecp256k1_v0_10_0_context *ctx, unsigned char *key) { + rustsecp256k1_v0_10_0_ecdsa_signature signature; + rustsecp256k1_v0_10_0_pubkey pubkey; size_t siglen = 74; size_t outputlen = 33; int i; @@ -78,11 +78,11 @@ static void run_tests(rustsecp256k1_v0_9_2_context *ctx, unsigned char *key) { unsigned char sig[74]; unsigned char spubkey[33]; #ifdef ENABLE_MODULE_RECOVERY - rustsecp256k1_v0_9_2_ecdsa_recoverable_signature recoverable_signature; + rustsecp256k1_v0_10_0_ecdsa_recoverable_signature recoverable_signature; int recid; #endif #ifdef ENABLE_MODULE_EXTRAKEYS - rustsecp256k1_v0_9_2_keypair keypair; + rustsecp256k1_v0_10_0_keypair keypair; #endif #ifdef ENABLE_MODULE_ELLSWIFT unsigned char ellswift[64]; @@ -95,24 +95,24 @@ static void run_tests(rustsecp256k1_v0_9_2_context *ctx, unsigned char *key) { /* Test keygen. */ SECP256K1_CHECKMEM_UNDEFINE(key, 32); - ret = rustsecp256k1_v0_9_2_ec_pubkey_create(ctx, &pubkey, key); - SECP256K1_CHECKMEM_DEFINE(&pubkey, sizeof(rustsecp256k1_v0_9_2_pubkey)); + ret = rustsecp256k1_v0_10_0_ec_pubkey_create(ctx, &pubkey, key); + SECP256K1_CHECKMEM_DEFINE(&pubkey, sizeof(rustsecp256k1_v0_10_0_pubkey)); SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret)); CHECK(ret); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_serialize(ctx, spubkey, &outputlen, &pubkey, SECP256K1_EC_COMPRESSED) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_serialize(ctx, spubkey, &outputlen, &pubkey, SECP256K1_EC_COMPRESSED) == 1); /* Test signing. */ SECP256K1_CHECKMEM_UNDEFINE(key, 32); - ret = rustsecp256k1_v0_9_2_ecdsa_sign(ctx, &signature, msg, key, NULL, NULL); - SECP256K1_CHECKMEM_DEFINE(&signature, sizeof(rustsecp256k1_v0_9_2_ecdsa_signature)); + ret = rustsecp256k1_v0_10_0_ecdsa_sign(ctx, &signature, msg, key, NULL, NULL); + SECP256K1_CHECKMEM_DEFINE(&signature, sizeof(rustsecp256k1_v0_10_0_ecdsa_signature)); SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret)); CHECK(ret); - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature)); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature)); #ifdef ENABLE_MODULE_ECDH /* Test ECDH. */ SECP256K1_CHECKMEM_UNDEFINE(key, 32); - ret = rustsecp256k1_v0_9_2_ecdh(ctx, msg, &pubkey, key, NULL, NULL); + ret = rustsecp256k1_v0_10_0_ecdh(ctx, msg, &pubkey, key, NULL, NULL); SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret)); CHECK(ret == 1); #endif @@ -120,87 +120,87 @@ static void run_tests(rustsecp256k1_v0_9_2_context *ctx, unsigned char *key) { #ifdef ENABLE_MODULE_RECOVERY /* Test signing a recoverable signature. */ SECP256K1_CHECKMEM_UNDEFINE(key, 32); - ret = rustsecp256k1_v0_9_2_ecdsa_sign_recoverable(ctx, &recoverable_signature, msg, key, NULL, NULL); + ret = rustsecp256k1_v0_10_0_ecdsa_sign_recoverable(ctx, &recoverable_signature, msg, key, NULL, NULL); SECP256K1_CHECKMEM_DEFINE(&recoverable_signature, sizeof(recoverable_signature)); SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret)); CHECK(ret); - CHECK(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &recoverable_signature)); + CHECK(rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &recoverable_signature)); CHECK(recid >= 0 && recid <= 3); #endif SECP256K1_CHECKMEM_UNDEFINE(key, 32); - ret = rustsecp256k1_v0_9_2_ec_seckey_verify(ctx, key); + ret = rustsecp256k1_v0_10_0_ec_seckey_verify(ctx, key); SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret)); CHECK(ret == 1); SECP256K1_CHECKMEM_UNDEFINE(key, 32); - ret = rustsecp256k1_v0_9_2_ec_seckey_negate(ctx, key); + ret = rustsecp256k1_v0_10_0_ec_seckey_negate(ctx, key); SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret)); CHECK(ret == 1); SECP256K1_CHECKMEM_UNDEFINE(key, 32); SECP256K1_CHECKMEM_UNDEFINE(msg, 32); - ret = rustsecp256k1_v0_9_2_ec_seckey_tweak_add(ctx, key, msg); + ret = rustsecp256k1_v0_10_0_ec_seckey_tweak_add(ctx, key, msg); SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret)); CHECK(ret == 1); SECP256K1_CHECKMEM_UNDEFINE(key, 32); SECP256K1_CHECKMEM_UNDEFINE(msg, 32); - ret = rustsecp256k1_v0_9_2_ec_seckey_tweak_mul(ctx, key, msg); + ret = rustsecp256k1_v0_10_0_ec_seckey_tweak_mul(ctx, key, msg); SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret)); CHECK(ret == 1); /* Test keypair_create and keypair_xonly_tweak_add. */ #ifdef ENABLE_MODULE_EXTRAKEYS SECP256K1_CHECKMEM_UNDEFINE(key, 32); - ret = rustsecp256k1_v0_9_2_keypair_create(ctx, &keypair, key); + ret = rustsecp256k1_v0_10_0_keypair_create(ctx, &keypair, key); SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret)); CHECK(ret == 1); /* The tweak is not treated as a secret in keypair_tweak_add */ SECP256K1_CHECKMEM_DEFINE(msg, 32); - ret = rustsecp256k1_v0_9_2_keypair_xonly_tweak_add(ctx, &keypair, msg); + ret = rustsecp256k1_v0_10_0_keypair_xonly_tweak_add(ctx, &keypair, msg); SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret)); CHECK(ret == 1); SECP256K1_CHECKMEM_UNDEFINE(key, 32); SECP256K1_CHECKMEM_UNDEFINE(&keypair, sizeof(keypair)); - ret = rustsecp256k1_v0_9_2_keypair_sec(ctx, key, &keypair); + ret = rustsecp256k1_v0_10_0_keypair_sec(ctx, key, &keypair); SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret)); CHECK(ret == 1); #endif #ifdef ENABLE_MODULE_SCHNORRSIG SECP256K1_CHECKMEM_UNDEFINE(key, 32); - ret = rustsecp256k1_v0_9_2_keypair_create(ctx, &keypair, key); + ret = rustsecp256k1_v0_10_0_keypair_create(ctx, &keypair, key); SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret)); CHECK(ret == 1); - ret = rustsecp256k1_v0_9_2_schnorrsig_sign32(ctx, sig, msg, &keypair, NULL); + ret = rustsecp256k1_v0_10_0_schnorrsig_sign32(ctx, sig, msg, &keypair, NULL); SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret)); CHECK(ret == 1); #endif #ifdef ENABLE_MODULE_ELLSWIFT SECP256K1_CHECKMEM_UNDEFINE(key, 32); - ret = rustsecp256k1_v0_9_2_ellswift_create(ctx, ellswift, key, NULL); + ret = rustsecp256k1_v0_10_0_ellswift_create(ctx, ellswift, key, NULL); SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret)); CHECK(ret == 1); SECP256K1_CHECKMEM_UNDEFINE(key, 32); - ret = rustsecp256k1_v0_9_2_ellswift_create(ctx, ellswift, key, ellswift); + ret = rustsecp256k1_v0_10_0_ellswift_create(ctx, ellswift, key, ellswift); SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret)); CHECK(ret == 1); for (i = 0; i < 2; i++) { SECP256K1_CHECKMEM_UNDEFINE(key, 32); SECP256K1_CHECKMEM_DEFINE(&ellswift, sizeof(ellswift)); - ret = rustsecp256k1_v0_9_2_ellswift_xdh(ctx, msg, ellswift, ellswift, key, i, rustsecp256k1_v0_9_2_ellswift_xdh_hash_function_bip324, NULL); + ret = rustsecp256k1_v0_10_0_ellswift_xdh(ctx, msg, ellswift, ellswift, key, i, rustsecp256k1_v0_10_0_ellswift_xdh_hash_function_bip324, NULL); SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret)); CHECK(ret == 1); SECP256K1_CHECKMEM_UNDEFINE(key, 32); SECP256K1_CHECKMEM_DEFINE(&ellswift, sizeof(ellswift)); - ret = rustsecp256k1_v0_9_2_ellswift_xdh(ctx, msg, ellswift, ellswift, key, i, rustsecp256k1_v0_9_2_ellswift_xdh_hash_function_prefix, (void *)prefix); + ret = rustsecp256k1_v0_10_0_ellswift_xdh(ctx, msg, ellswift, ellswift, key, i, rustsecp256k1_v0_10_0_ellswift_xdh_hash_function_prefix, (void *)prefix); SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret)); CHECK(ret == 1); } diff --git a/secp256k1-sys/depend/secp256k1/src/ecdsa.h b/secp256k1-sys/depend/secp256k1/src/ecdsa.h index 79a009381..79a6f3071 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecdsa.h +++ b/secp256k1-sys/depend/secp256k1/src/ecdsa.h @@ -13,9 +13,9 @@ #include "group.h" #include "ecmult.h" -static int rustsecp256k1_v0_9_2_ecdsa_sig_parse(rustsecp256k1_v0_9_2_scalar *r, rustsecp256k1_v0_9_2_scalar *s, const unsigned char *sig, size_t size); -static int rustsecp256k1_v0_9_2_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const rustsecp256k1_v0_9_2_scalar *r, const rustsecp256k1_v0_9_2_scalar *s); -static int rustsecp256k1_v0_9_2_ecdsa_sig_verify(const rustsecp256k1_v0_9_2_scalar* r, const rustsecp256k1_v0_9_2_scalar* s, const rustsecp256k1_v0_9_2_ge *pubkey, const rustsecp256k1_v0_9_2_scalar *message); -static int rustsecp256k1_v0_9_2_ecdsa_sig_sign(const rustsecp256k1_v0_9_2_ecmult_gen_context *ctx, rustsecp256k1_v0_9_2_scalar* r, rustsecp256k1_v0_9_2_scalar* s, const rustsecp256k1_v0_9_2_scalar *seckey, const rustsecp256k1_v0_9_2_scalar *message, const rustsecp256k1_v0_9_2_scalar *nonce, int *recid); +static int rustsecp256k1_v0_10_0_ecdsa_sig_parse(rustsecp256k1_v0_10_0_scalar *r, rustsecp256k1_v0_10_0_scalar *s, const unsigned char *sig, size_t size); +static int rustsecp256k1_v0_10_0_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *s); +static int rustsecp256k1_v0_10_0_ecdsa_sig_verify(const rustsecp256k1_v0_10_0_scalar* r, const rustsecp256k1_v0_10_0_scalar* s, const rustsecp256k1_v0_10_0_ge *pubkey, const rustsecp256k1_v0_10_0_scalar *message); +static int rustsecp256k1_v0_10_0_ecdsa_sig_sign(const rustsecp256k1_v0_10_0_ecmult_gen_context *ctx, rustsecp256k1_v0_10_0_scalar* r, rustsecp256k1_v0_10_0_scalar* s, const rustsecp256k1_v0_10_0_scalar *seckey, const rustsecp256k1_v0_10_0_scalar *message, const rustsecp256k1_v0_10_0_scalar *nonce, int *recid); #endif /* SECP256K1_ECDSA_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/ecdsa_impl.h b/secp256k1-sys/depend/secp256k1/src/ecdsa_impl.h index 9e922e88c..0bede02ad 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecdsa_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/ecdsa_impl.h @@ -16,24 +16,24 @@ #include "ecdsa.h" /** Group order for secp256k1 defined as 'n' in "Standards for Efficient Cryptography" (SEC2) 2.7.1 - * $ sage -c 'load("rustsecp256k1_v0_9_2_params.sage"); print(hex(N))' + * $ sage -c 'load("rustsecp256k1_v0_10_0_params.sage"); print(hex(N))' * 0xfffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141 */ -static const rustsecp256k1_v0_9_2_fe rustsecp256k1_v0_9_2_ecdsa_const_order_as_fe = SECP256K1_FE_CONST( +static const rustsecp256k1_v0_10_0_fe rustsecp256k1_v0_10_0_ecdsa_const_order_as_fe = SECP256K1_FE_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL, 0xBAAEDCE6UL, 0xAF48A03BUL, 0xBFD25E8CUL, 0xD0364141UL ); /** Difference between field and order, values 'p' and 'n' values defined in * "Standards for Efficient Cryptography" (SEC2) 2.7.1. - * $ sage -c 'load("rustsecp256k1_v0_9_2_params.sage"); print(hex(P-N))' + * $ sage -c 'load("rustsecp256k1_v0_10_0_params.sage"); print(hex(P-N))' * 0x14551231950b75fc4402da1722fc9baee */ -static const rustsecp256k1_v0_9_2_fe rustsecp256k1_v0_9_2_ecdsa_const_p_minus_order = SECP256K1_FE_CONST( +static const rustsecp256k1_v0_10_0_fe rustsecp256k1_v0_10_0_ecdsa_const_p_minus_order = SECP256K1_FE_CONST( 0, 0, 0, 1, 0x45512319UL, 0x50B75FC4UL, 0x402DA172UL, 0x2FC9BAEEUL ); -static int rustsecp256k1_v0_9_2_der_read_len(size_t *len, const unsigned char **sigp, const unsigned char *sigend) { +static int rustsecp256k1_v0_10_0_der_read_len(size_t *len, const unsigned char **sigp, const unsigned char *sigend) { size_t lenleft; unsigned char b1; VERIFY_CHECK(len != NULL); @@ -66,8 +66,7 @@ static int rustsecp256k1_v0_9_2_der_read_len(size_t *len, const unsigned char ** } if (lenleft > sizeof(size_t)) { /* The resulting length would exceed the range of a size_t, so - * certainly longer than the passed array size. - */ + * it is certainly longer than the passed array size. */ return 0; } while (lenleft > 0) { @@ -76,7 +75,9 @@ static int rustsecp256k1_v0_9_2_der_read_len(size_t *len, const unsigned char ** lenleft--; } if (*len > (size_t)(sigend - *sigp)) { - /* Result exceeds the length of the passed array. */ + /* Result exceeds the length of the passed array. + (Checking this is the responsibility of the caller but it + can't hurt do it here, too.) */ return 0; } if (*len < 128) { @@ -86,7 +87,7 @@ static int rustsecp256k1_v0_9_2_der_read_len(size_t *len, const unsigned char ** return 1; } -static int rustsecp256k1_v0_9_2_der_parse_integer(rustsecp256k1_v0_9_2_scalar *r, const unsigned char **sig, const unsigned char *sigend) { +static int rustsecp256k1_v0_10_0_der_parse_integer(rustsecp256k1_v0_10_0_scalar *r, const unsigned char **sig, const unsigned char *sigend) { int overflow = 0; unsigned char ra[32] = {0}; size_t rlen; @@ -96,7 +97,7 @@ static int rustsecp256k1_v0_9_2_der_parse_integer(rustsecp256k1_v0_9_2_scalar *r return 0; } (*sig)++; - if (rustsecp256k1_v0_9_2_der_read_len(&rlen, sig, sigend) == 0) { + if (rustsecp256k1_v0_10_0_der_read_len(&rlen, sig, sigend) == 0) { return 0; } if (rlen == 0 || rlen > (size_t)(sigend - *sig)) { @@ -128,23 +129,23 @@ static int rustsecp256k1_v0_9_2_der_parse_integer(rustsecp256k1_v0_9_2_scalar *r } if (!overflow) { if (rlen) memcpy(ra + 32 - rlen, *sig, rlen); - rustsecp256k1_v0_9_2_scalar_set_b32(r, ra, &overflow); + rustsecp256k1_v0_10_0_scalar_set_b32(r, ra, &overflow); } if (overflow) { - rustsecp256k1_v0_9_2_scalar_set_int(r, 0); + rustsecp256k1_v0_10_0_scalar_set_int(r, 0); } (*sig) += rlen; return 1; } -static int rustsecp256k1_v0_9_2_ecdsa_sig_parse(rustsecp256k1_v0_9_2_scalar *rr, rustsecp256k1_v0_9_2_scalar *rs, const unsigned char *sig, size_t size) { +static int rustsecp256k1_v0_10_0_ecdsa_sig_parse(rustsecp256k1_v0_10_0_scalar *rr, rustsecp256k1_v0_10_0_scalar *rs, const unsigned char *sig, size_t size) { const unsigned char *sigend = sig + size; size_t rlen; if (sig == sigend || *(sig++) != 0x30) { /* The encoding doesn't start with a constructed sequence (X.690-0207 8.9.1). */ return 0; } - if (rustsecp256k1_v0_9_2_der_read_len(&rlen, &sig, sigend) == 0) { + if (rustsecp256k1_v0_10_0_der_read_len(&rlen, &sig, sigend) == 0) { return 0; } if (rlen != (size_t)(sigend - sig)) { @@ -152,10 +153,10 @@ static int rustsecp256k1_v0_9_2_ecdsa_sig_parse(rustsecp256k1_v0_9_2_scalar *rr, return 0; } - if (!rustsecp256k1_v0_9_2_der_parse_integer(rr, &sig, sigend)) { + if (!rustsecp256k1_v0_10_0_der_parse_integer(rr, &sig, sigend)) { return 0; } - if (!rustsecp256k1_v0_9_2_der_parse_integer(rs, &sig, sigend)) { + if (!rustsecp256k1_v0_10_0_der_parse_integer(rs, &sig, sigend)) { return 0; } @@ -167,12 +168,12 @@ static int rustsecp256k1_v0_9_2_ecdsa_sig_parse(rustsecp256k1_v0_9_2_scalar *rr, return 1; } -static int rustsecp256k1_v0_9_2_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const rustsecp256k1_v0_9_2_scalar* ar, const rustsecp256k1_v0_9_2_scalar* as) { +static int rustsecp256k1_v0_10_0_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const rustsecp256k1_v0_10_0_scalar* ar, const rustsecp256k1_v0_10_0_scalar* as) { unsigned char r[33] = {0}, s[33] = {0}; unsigned char *rp = r, *sp = s; size_t lenR = 33, lenS = 33; - rustsecp256k1_v0_9_2_scalar_get_b32(&r[1], ar); - rustsecp256k1_v0_9_2_scalar_get_b32(&s[1], as); + rustsecp256k1_v0_10_0_scalar_get_b32(&r[1], ar); + rustsecp256k1_v0_10_0_scalar_get_b32(&s[1], as); while (lenR > 1 && rp[0] == 0 && rp[1] < 0x80) { lenR--; rp++; } while (lenS > 1 && sp[0] == 0 && sp[1] < 0x80) { lenS--; sp++; } if (*size < 6+lenS+lenR) { @@ -191,43 +192,43 @@ static int rustsecp256k1_v0_9_2_ecdsa_sig_serialize(unsigned char *sig, size_t * return 1; } -static int rustsecp256k1_v0_9_2_ecdsa_sig_verify(const rustsecp256k1_v0_9_2_scalar *sigr, const rustsecp256k1_v0_9_2_scalar *sigs, const rustsecp256k1_v0_9_2_ge *pubkey, const rustsecp256k1_v0_9_2_scalar *message) { +static int rustsecp256k1_v0_10_0_ecdsa_sig_verify(const rustsecp256k1_v0_10_0_scalar *sigr, const rustsecp256k1_v0_10_0_scalar *sigs, const rustsecp256k1_v0_10_0_ge *pubkey, const rustsecp256k1_v0_10_0_scalar *message) { unsigned char c[32]; - rustsecp256k1_v0_9_2_scalar sn, u1, u2; + rustsecp256k1_v0_10_0_scalar sn, u1, u2; #if !defined(EXHAUSTIVE_TEST_ORDER) - rustsecp256k1_v0_9_2_fe xr; + rustsecp256k1_v0_10_0_fe xr; #endif - rustsecp256k1_v0_9_2_gej pubkeyj; - rustsecp256k1_v0_9_2_gej pr; + rustsecp256k1_v0_10_0_gej pubkeyj; + rustsecp256k1_v0_10_0_gej pr; - if (rustsecp256k1_v0_9_2_scalar_is_zero(sigr) || rustsecp256k1_v0_9_2_scalar_is_zero(sigs)) { + if (rustsecp256k1_v0_10_0_scalar_is_zero(sigr) || rustsecp256k1_v0_10_0_scalar_is_zero(sigs)) { return 0; } - rustsecp256k1_v0_9_2_scalar_inverse_var(&sn, sigs); - rustsecp256k1_v0_9_2_scalar_mul(&u1, &sn, message); - rustsecp256k1_v0_9_2_scalar_mul(&u2, &sn, sigr); - rustsecp256k1_v0_9_2_gej_set_ge(&pubkeyj, pubkey); - rustsecp256k1_v0_9_2_ecmult(&pr, &pubkeyj, &u2, &u1); - if (rustsecp256k1_v0_9_2_gej_is_infinity(&pr)) { + rustsecp256k1_v0_10_0_scalar_inverse_var(&sn, sigs); + rustsecp256k1_v0_10_0_scalar_mul(&u1, &sn, message); + rustsecp256k1_v0_10_0_scalar_mul(&u2, &sn, sigr); + rustsecp256k1_v0_10_0_gej_set_ge(&pubkeyj, pubkey); + rustsecp256k1_v0_10_0_ecmult(&pr, &pubkeyj, &u2, &u1); + if (rustsecp256k1_v0_10_0_gej_is_infinity(&pr)) { return 0; } #if defined(EXHAUSTIVE_TEST_ORDER) { - rustsecp256k1_v0_9_2_scalar computed_r; - rustsecp256k1_v0_9_2_ge pr_ge; - rustsecp256k1_v0_9_2_ge_set_gej(&pr_ge, &pr); - rustsecp256k1_v0_9_2_fe_normalize(&pr_ge.x); + rustsecp256k1_v0_10_0_scalar computed_r; + rustsecp256k1_v0_10_0_ge pr_ge; + rustsecp256k1_v0_10_0_ge_set_gej(&pr_ge, &pr); + rustsecp256k1_v0_10_0_fe_normalize(&pr_ge.x); - rustsecp256k1_v0_9_2_fe_get_b32(c, &pr_ge.x); - rustsecp256k1_v0_9_2_scalar_set_b32(&computed_r, c, NULL); - return rustsecp256k1_v0_9_2_scalar_eq(sigr, &computed_r); + rustsecp256k1_v0_10_0_fe_get_b32(c, &pr_ge.x); + rustsecp256k1_v0_10_0_scalar_set_b32(&computed_r, c, NULL); + return rustsecp256k1_v0_10_0_scalar_eq(sigr, &computed_r); } #else - rustsecp256k1_v0_9_2_scalar_get_b32(c, sigr); + rustsecp256k1_v0_10_0_scalar_get_b32(c, sigr); /* we can ignore the fe_set_b32_limit return value, because we know the input is in range */ - (void)rustsecp256k1_v0_9_2_fe_set_b32_limit(&xr, c); + (void)rustsecp256k1_v0_10_0_fe_set_b32_limit(&xr, c); /** We now have the recomputed R point in pr, and its claimed x coordinate (modulo n) * in xr. Naively, we would extract the x coordinate from pr (requiring a inversion modulo p), @@ -243,18 +244,18 @@ static int rustsecp256k1_v0_9_2_ecdsa_sig_verify(const rustsecp256k1_v0_9_2_scal * <=> (xr * pr.z^2 mod p == pr.x) || (xr + n < p && (xr + n) * pr.z^2 mod p == pr.x) * * Thus, we can avoid the inversion, but we have to check both cases separately. - * rustsecp256k1_v0_9_2_gej_eq_x implements the (xr * pr.z^2 mod p == pr.x) test. + * rustsecp256k1_v0_10_0_gej_eq_x implements the (xr * pr.z^2 mod p == pr.x) test. */ - if (rustsecp256k1_v0_9_2_gej_eq_x_var(&xr, &pr)) { + if (rustsecp256k1_v0_10_0_gej_eq_x_var(&xr, &pr)) { /* xr * pr.z^2 mod p == pr.x, so the signature is valid. */ return 1; } - if (rustsecp256k1_v0_9_2_fe_cmp_var(&xr, &rustsecp256k1_v0_9_2_ecdsa_const_p_minus_order) >= 0) { + if (rustsecp256k1_v0_10_0_fe_cmp_var(&xr, &rustsecp256k1_v0_10_0_ecdsa_const_p_minus_order) >= 0) { /* xr + n >= p, so we can skip testing the second case. */ return 0; } - rustsecp256k1_v0_9_2_fe_add(&xr, &rustsecp256k1_v0_9_2_ecdsa_const_order_as_fe); - if (rustsecp256k1_v0_9_2_gej_eq_x_var(&xr, &pr)) { + rustsecp256k1_v0_10_0_fe_add(&xr, &rustsecp256k1_v0_10_0_ecdsa_const_order_as_fe); + if (rustsecp256k1_v0_10_0_gej_eq_x_var(&xr, &pr)) { /* (xr + n) * pr.z^2 mod p == pr.x, so the signature is valid. */ return 1; } @@ -262,42 +263,42 @@ static int rustsecp256k1_v0_9_2_ecdsa_sig_verify(const rustsecp256k1_v0_9_2_scal #endif } -static int rustsecp256k1_v0_9_2_ecdsa_sig_sign(const rustsecp256k1_v0_9_2_ecmult_gen_context *ctx, rustsecp256k1_v0_9_2_scalar *sigr, rustsecp256k1_v0_9_2_scalar *sigs, const rustsecp256k1_v0_9_2_scalar *seckey, const rustsecp256k1_v0_9_2_scalar *message, const rustsecp256k1_v0_9_2_scalar *nonce, int *recid) { +static int rustsecp256k1_v0_10_0_ecdsa_sig_sign(const rustsecp256k1_v0_10_0_ecmult_gen_context *ctx, rustsecp256k1_v0_10_0_scalar *sigr, rustsecp256k1_v0_10_0_scalar *sigs, const rustsecp256k1_v0_10_0_scalar *seckey, const rustsecp256k1_v0_10_0_scalar *message, const rustsecp256k1_v0_10_0_scalar *nonce, int *recid) { unsigned char b[32]; - rustsecp256k1_v0_9_2_gej rp; - rustsecp256k1_v0_9_2_ge r; - rustsecp256k1_v0_9_2_scalar n; + rustsecp256k1_v0_10_0_gej rp; + rustsecp256k1_v0_10_0_ge r; + rustsecp256k1_v0_10_0_scalar n; int overflow = 0; int high; - rustsecp256k1_v0_9_2_ecmult_gen(ctx, &rp, nonce); - rustsecp256k1_v0_9_2_ge_set_gej(&r, &rp); - rustsecp256k1_v0_9_2_fe_normalize(&r.x); - rustsecp256k1_v0_9_2_fe_normalize(&r.y); - rustsecp256k1_v0_9_2_fe_get_b32(b, &r.x); - rustsecp256k1_v0_9_2_scalar_set_b32(sigr, b, &overflow); + rustsecp256k1_v0_10_0_ecmult_gen(ctx, &rp, nonce); + rustsecp256k1_v0_10_0_ge_set_gej(&r, &rp); + rustsecp256k1_v0_10_0_fe_normalize(&r.x); + rustsecp256k1_v0_10_0_fe_normalize(&r.y); + rustsecp256k1_v0_10_0_fe_get_b32(b, &r.x); + rustsecp256k1_v0_10_0_scalar_set_b32(sigr, b, &overflow); if (recid) { /* The overflow condition is cryptographically unreachable as hitting it requires finding the discrete log * of some P where P.x >= order, and only 1 in about 2^127 points meet this criteria. */ - *recid = (overflow << 1) | rustsecp256k1_v0_9_2_fe_is_odd(&r.y); - } - rustsecp256k1_v0_9_2_scalar_mul(&n, sigr, seckey); - rustsecp256k1_v0_9_2_scalar_add(&n, &n, message); - rustsecp256k1_v0_9_2_scalar_inverse(sigs, nonce); - rustsecp256k1_v0_9_2_scalar_mul(sigs, sigs, &n); - rustsecp256k1_v0_9_2_scalar_clear(&n); - rustsecp256k1_v0_9_2_gej_clear(&rp); - rustsecp256k1_v0_9_2_ge_clear(&r); - high = rustsecp256k1_v0_9_2_scalar_is_high(sigs); - rustsecp256k1_v0_9_2_scalar_cond_negate(sigs, high); + *recid = (overflow << 1) | rustsecp256k1_v0_10_0_fe_is_odd(&r.y); + } + rustsecp256k1_v0_10_0_scalar_mul(&n, sigr, seckey); + rustsecp256k1_v0_10_0_scalar_add(&n, &n, message); + rustsecp256k1_v0_10_0_scalar_inverse(sigs, nonce); + rustsecp256k1_v0_10_0_scalar_mul(sigs, sigs, &n); + rustsecp256k1_v0_10_0_scalar_clear(&n); + rustsecp256k1_v0_10_0_gej_clear(&rp); + rustsecp256k1_v0_10_0_ge_clear(&r); + high = rustsecp256k1_v0_10_0_scalar_is_high(sigs); + rustsecp256k1_v0_10_0_scalar_cond_negate(sigs, high); if (recid) { *recid ^= high; } /* P.x = order is on the curve, so technically sig->r could end up being zero, which would be an invalid signature. * This is cryptographically unreachable as hitting it requires finding the discrete log of P.x = N. */ - return (int)(!rustsecp256k1_v0_9_2_scalar_is_zero(sigr)) & (int)(!rustsecp256k1_v0_9_2_scalar_is_zero(sigs)); + return (int)(!rustsecp256k1_v0_10_0_scalar_is_zero(sigr)) & (int)(!rustsecp256k1_v0_10_0_scalar_is_zero(sigs)); } #endif /* SECP256K1_ECDSA_IMPL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/eckey.h b/secp256k1-sys/depend/secp256k1/src/eckey.h index 18b5a8f9c..5ccf8ced1 100644 --- a/secp256k1-sys/depend/secp256k1/src/eckey.h +++ b/secp256k1-sys/depend/secp256k1/src/eckey.h @@ -14,12 +14,12 @@ #include "ecmult.h" #include "ecmult_gen.h" -static int rustsecp256k1_v0_9_2_eckey_pubkey_parse(rustsecp256k1_v0_9_2_ge *elem, const unsigned char *pub, size_t size); -static int rustsecp256k1_v0_9_2_eckey_pubkey_serialize(rustsecp256k1_v0_9_2_ge *elem, unsigned char *pub, size_t *size, int compressed); +static int rustsecp256k1_v0_10_0_eckey_pubkey_parse(rustsecp256k1_v0_10_0_ge *elem, const unsigned char *pub, size_t size); +static int rustsecp256k1_v0_10_0_eckey_pubkey_serialize(rustsecp256k1_v0_10_0_ge *elem, unsigned char *pub, size_t *size, int compressed); -static int rustsecp256k1_v0_9_2_eckey_privkey_tweak_add(rustsecp256k1_v0_9_2_scalar *key, const rustsecp256k1_v0_9_2_scalar *tweak); -static int rustsecp256k1_v0_9_2_eckey_pubkey_tweak_add(rustsecp256k1_v0_9_2_ge *key, const rustsecp256k1_v0_9_2_scalar *tweak); -static int rustsecp256k1_v0_9_2_eckey_privkey_tweak_mul(rustsecp256k1_v0_9_2_scalar *key, const rustsecp256k1_v0_9_2_scalar *tweak); -static int rustsecp256k1_v0_9_2_eckey_pubkey_tweak_mul(rustsecp256k1_v0_9_2_ge *key, const rustsecp256k1_v0_9_2_scalar *tweak); +static int rustsecp256k1_v0_10_0_eckey_privkey_tweak_add(rustsecp256k1_v0_10_0_scalar *key, const rustsecp256k1_v0_10_0_scalar *tweak); +static int rustsecp256k1_v0_10_0_eckey_pubkey_tweak_add(rustsecp256k1_v0_10_0_ge *key, const rustsecp256k1_v0_10_0_scalar *tweak); +static int rustsecp256k1_v0_10_0_eckey_privkey_tweak_mul(rustsecp256k1_v0_10_0_scalar *key, const rustsecp256k1_v0_10_0_scalar *tweak); +static int rustsecp256k1_v0_10_0_eckey_pubkey_tweak_mul(rustsecp256k1_v0_10_0_ge *key, const rustsecp256k1_v0_10_0_scalar *tweak); #endif /* SECP256K1_ECKEY_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/eckey_impl.h b/secp256k1-sys/depend/secp256k1/src/eckey_impl.h index 03e09769e..5e90ead47 100644 --- a/secp256k1-sys/depend/secp256k1/src/eckey_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/eckey_impl.h @@ -14,78 +14,78 @@ #include "group.h" #include "ecmult_gen.h" -static int rustsecp256k1_v0_9_2_eckey_pubkey_parse(rustsecp256k1_v0_9_2_ge *elem, const unsigned char *pub, size_t size) { +static int rustsecp256k1_v0_10_0_eckey_pubkey_parse(rustsecp256k1_v0_10_0_ge *elem, const unsigned char *pub, size_t size) { if (size == 33 && (pub[0] == SECP256K1_TAG_PUBKEY_EVEN || pub[0] == SECP256K1_TAG_PUBKEY_ODD)) { - rustsecp256k1_v0_9_2_fe x; - return rustsecp256k1_v0_9_2_fe_set_b32_limit(&x, pub+1) && rustsecp256k1_v0_9_2_ge_set_xo_var(elem, &x, pub[0] == SECP256K1_TAG_PUBKEY_ODD); + rustsecp256k1_v0_10_0_fe x; + return rustsecp256k1_v0_10_0_fe_set_b32_limit(&x, pub+1) && rustsecp256k1_v0_10_0_ge_set_xo_var(elem, &x, pub[0] == SECP256K1_TAG_PUBKEY_ODD); } else if (size == 65 && (pub[0] == SECP256K1_TAG_PUBKEY_UNCOMPRESSED || pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_EVEN || pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD)) { - rustsecp256k1_v0_9_2_fe x, y; - if (!rustsecp256k1_v0_9_2_fe_set_b32_limit(&x, pub+1) || !rustsecp256k1_v0_9_2_fe_set_b32_limit(&y, pub+33)) { + rustsecp256k1_v0_10_0_fe x, y; + if (!rustsecp256k1_v0_10_0_fe_set_b32_limit(&x, pub+1) || !rustsecp256k1_v0_10_0_fe_set_b32_limit(&y, pub+33)) { return 0; } - rustsecp256k1_v0_9_2_ge_set_xy(elem, &x, &y); + rustsecp256k1_v0_10_0_ge_set_xy(elem, &x, &y); if ((pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_EVEN || pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD) && - rustsecp256k1_v0_9_2_fe_is_odd(&y) != (pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD)) { + rustsecp256k1_v0_10_0_fe_is_odd(&y) != (pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD)) { return 0; } - return rustsecp256k1_v0_9_2_ge_is_valid_var(elem); + return rustsecp256k1_v0_10_0_ge_is_valid_var(elem); } else { return 0; } } -static int rustsecp256k1_v0_9_2_eckey_pubkey_serialize(rustsecp256k1_v0_9_2_ge *elem, unsigned char *pub, size_t *size, int compressed) { - if (rustsecp256k1_v0_9_2_ge_is_infinity(elem)) { +static int rustsecp256k1_v0_10_0_eckey_pubkey_serialize(rustsecp256k1_v0_10_0_ge *elem, unsigned char *pub, size_t *size, int compressed) { + if (rustsecp256k1_v0_10_0_ge_is_infinity(elem)) { return 0; } - rustsecp256k1_v0_9_2_fe_normalize_var(&elem->x); - rustsecp256k1_v0_9_2_fe_normalize_var(&elem->y); - rustsecp256k1_v0_9_2_fe_get_b32(&pub[1], &elem->x); + rustsecp256k1_v0_10_0_fe_normalize_var(&elem->x); + rustsecp256k1_v0_10_0_fe_normalize_var(&elem->y); + rustsecp256k1_v0_10_0_fe_get_b32(&pub[1], &elem->x); if (compressed) { *size = 33; - pub[0] = rustsecp256k1_v0_9_2_fe_is_odd(&elem->y) ? SECP256K1_TAG_PUBKEY_ODD : SECP256K1_TAG_PUBKEY_EVEN; + pub[0] = rustsecp256k1_v0_10_0_fe_is_odd(&elem->y) ? SECP256K1_TAG_PUBKEY_ODD : SECP256K1_TAG_PUBKEY_EVEN; } else { *size = 65; pub[0] = SECP256K1_TAG_PUBKEY_UNCOMPRESSED; - rustsecp256k1_v0_9_2_fe_get_b32(&pub[33], &elem->y); + rustsecp256k1_v0_10_0_fe_get_b32(&pub[33], &elem->y); } return 1; } -static int rustsecp256k1_v0_9_2_eckey_privkey_tweak_add(rustsecp256k1_v0_9_2_scalar *key, const rustsecp256k1_v0_9_2_scalar *tweak) { - rustsecp256k1_v0_9_2_scalar_add(key, key, tweak); - return !rustsecp256k1_v0_9_2_scalar_is_zero(key); +static int rustsecp256k1_v0_10_0_eckey_privkey_tweak_add(rustsecp256k1_v0_10_0_scalar *key, const rustsecp256k1_v0_10_0_scalar *tweak) { + rustsecp256k1_v0_10_0_scalar_add(key, key, tweak); + return !rustsecp256k1_v0_10_0_scalar_is_zero(key); } -static int rustsecp256k1_v0_9_2_eckey_pubkey_tweak_add(rustsecp256k1_v0_9_2_ge *key, const rustsecp256k1_v0_9_2_scalar *tweak) { - rustsecp256k1_v0_9_2_gej pt; - rustsecp256k1_v0_9_2_gej_set_ge(&pt, key); - rustsecp256k1_v0_9_2_ecmult(&pt, &pt, &rustsecp256k1_v0_9_2_scalar_one, tweak); +static int rustsecp256k1_v0_10_0_eckey_pubkey_tweak_add(rustsecp256k1_v0_10_0_ge *key, const rustsecp256k1_v0_10_0_scalar *tweak) { + rustsecp256k1_v0_10_0_gej pt; + rustsecp256k1_v0_10_0_gej_set_ge(&pt, key); + rustsecp256k1_v0_10_0_ecmult(&pt, &pt, &rustsecp256k1_v0_10_0_scalar_one, tweak); - if (rustsecp256k1_v0_9_2_gej_is_infinity(&pt)) { + if (rustsecp256k1_v0_10_0_gej_is_infinity(&pt)) { return 0; } - rustsecp256k1_v0_9_2_ge_set_gej(key, &pt); + rustsecp256k1_v0_10_0_ge_set_gej(key, &pt); return 1; } -static int rustsecp256k1_v0_9_2_eckey_privkey_tweak_mul(rustsecp256k1_v0_9_2_scalar *key, const rustsecp256k1_v0_9_2_scalar *tweak) { +static int rustsecp256k1_v0_10_0_eckey_privkey_tweak_mul(rustsecp256k1_v0_10_0_scalar *key, const rustsecp256k1_v0_10_0_scalar *tweak) { int ret; - ret = !rustsecp256k1_v0_9_2_scalar_is_zero(tweak); + ret = !rustsecp256k1_v0_10_0_scalar_is_zero(tweak); - rustsecp256k1_v0_9_2_scalar_mul(key, key, tweak); + rustsecp256k1_v0_10_0_scalar_mul(key, key, tweak); return ret; } -static int rustsecp256k1_v0_9_2_eckey_pubkey_tweak_mul(rustsecp256k1_v0_9_2_ge *key, const rustsecp256k1_v0_9_2_scalar *tweak) { - rustsecp256k1_v0_9_2_gej pt; - if (rustsecp256k1_v0_9_2_scalar_is_zero(tweak)) { +static int rustsecp256k1_v0_10_0_eckey_pubkey_tweak_mul(rustsecp256k1_v0_10_0_ge *key, const rustsecp256k1_v0_10_0_scalar *tweak) { + rustsecp256k1_v0_10_0_gej pt; + if (rustsecp256k1_v0_10_0_scalar_is_zero(tweak)) { return 0; } - rustsecp256k1_v0_9_2_gej_set_ge(&pt, key); - rustsecp256k1_v0_9_2_ecmult(&pt, &pt, tweak, &rustsecp256k1_v0_9_2_scalar_zero); - rustsecp256k1_v0_9_2_ge_set_gej(key, &pt); + rustsecp256k1_v0_10_0_gej_set_ge(&pt, key); + rustsecp256k1_v0_10_0_ecmult(&pt, &pt, tweak, &rustsecp256k1_v0_10_0_scalar_zero); + rustsecp256k1_v0_10_0_ge_set_gej(key, &pt); return 1; } diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult.h b/secp256k1-sys/depend/secp256k1/src/ecmult.h index fc37f52ac..3a2b1d5a2 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult.h @@ -41,9 +41,9 @@ #define ECMULT_TABLE_SIZE(w) (1L << ((w)-2)) /** Double multiply: R = na*A + ng*G */ -static void rustsecp256k1_v0_9_2_ecmult(rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_gej *a, const rustsecp256k1_v0_9_2_scalar *na, const rustsecp256k1_v0_9_2_scalar *ng); +static void rustsecp256k1_v0_10_0_ecmult(rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_gej *a, const rustsecp256k1_v0_10_0_scalar *na, const rustsecp256k1_v0_10_0_scalar *ng); -typedef int (rustsecp256k1_v0_9_2_ecmult_multi_callback)(rustsecp256k1_v0_9_2_scalar *sc, rustsecp256k1_v0_9_2_ge *pt, size_t idx, void *data); +typedef int (rustsecp256k1_v0_10_0_ecmult_multi_callback)(rustsecp256k1_v0_10_0_scalar *sc, rustsecp256k1_v0_10_0_ge *pt, size_t idx, void *data); /** * Multi-multiply: R = inp_g_sc * G + sum_i ni * Ai. @@ -56,6 +56,6 @@ typedef int (rustsecp256k1_v0_9_2_ecmult_multi_callback)(rustsecp256k1_v0_9_2_sc * 0 if there is not enough scratch space for a single point or * callback returns 0 */ -static int rustsecp256k1_v0_9_2_ecmult_multi_var(const rustsecp256k1_v0_9_2_callback* error_callback, rustsecp256k1_v0_9_2_scratch *scratch, rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_scalar *inp_g_sc, rustsecp256k1_v0_9_2_ecmult_multi_callback cb, void *cbdata, size_t n); +static int rustsecp256k1_v0_10_0_ecmult_multi_var(const rustsecp256k1_v0_10_0_callback* error_callback, rustsecp256k1_v0_10_0_scratch *scratch, rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_scalar *inp_g_sc, rustsecp256k1_v0_10_0_ecmult_multi_callback cb, void *cbdata, size_t n); #endif /* SECP256K1_ECMULT_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult_compute_table.h b/secp256k1-sys/depend/secp256k1/src/ecmult_compute_table.h index d552a5cee..223cbaad7 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult_compute_table.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult_compute_table.h @@ -8,9 +8,9 @@ #define SECP256K1_ECMULT_COMPUTE_TABLE_H /* Construct table of all odd multiples of gen in range 1..(2**(window_g-1)-1). */ -static void rustsecp256k1_v0_9_2_ecmult_compute_table(rustsecp256k1_v0_9_2_ge_storage* table, int window_g, const rustsecp256k1_v0_9_2_gej* gen); +static void rustsecp256k1_v0_10_0_ecmult_compute_table(rustsecp256k1_v0_10_0_ge_storage* table, int window_g, const rustsecp256k1_v0_10_0_gej* gen); -/* Like rustsecp256k1_v0_9_2_ecmult_compute_table, but one for both gen and gen*2^128. */ -static void rustsecp256k1_v0_9_2_ecmult_compute_two_tables(rustsecp256k1_v0_9_2_ge_storage* table, rustsecp256k1_v0_9_2_ge_storage* table_128, int window_g, const rustsecp256k1_v0_9_2_ge* gen); +/* Like rustsecp256k1_v0_10_0_ecmult_compute_table, but one for both gen and gen*2^128. */ +static void rustsecp256k1_v0_10_0_ecmult_compute_two_tables(rustsecp256k1_v0_10_0_ge_storage* table, rustsecp256k1_v0_10_0_ge_storage* table_128, int window_g, const rustsecp256k1_v0_10_0_ge* gen); #endif /* SECP256K1_ECMULT_COMPUTE_TABLE_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult_compute_table_impl.h b/secp256k1-sys/depend/secp256k1/src/ecmult_compute_table_impl.h index fd7d1bf87..a83af1ea9 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult_compute_table_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult_compute_table_impl.h @@ -13,37 +13,37 @@ #include "ecmult.h" #include "util.h" -static void rustsecp256k1_v0_9_2_ecmult_compute_table(rustsecp256k1_v0_9_2_ge_storage* table, int window_g, const rustsecp256k1_v0_9_2_gej* gen) { - rustsecp256k1_v0_9_2_gej gj; - rustsecp256k1_v0_9_2_ge ge, dgen; +static void rustsecp256k1_v0_10_0_ecmult_compute_table(rustsecp256k1_v0_10_0_ge_storage* table, int window_g, const rustsecp256k1_v0_10_0_gej* gen) { + rustsecp256k1_v0_10_0_gej gj; + rustsecp256k1_v0_10_0_ge ge, dgen; int j; gj = *gen; - rustsecp256k1_v0_9_2_ge_set_gej_var(&ge, &gj); - rustsecp256k1_v0_9_2_ge_to_storage(&table[0], &ge); + rustsecp256k1_v0_10_0_ge_set_gej_var(&ge, &gj); + rustsecp256k1_v0_10_0_ge_to_storage(&table[0], &ge); - rustsecp256k1_v0_9_2_gej_double_var(&gj, gen, NULL); - rustsecp256k1_v0_9_2_ge_set_gej_var(&dgen, &gj); + rustsecp256k1_v0_10_0_gej_double_var(&gj, gen, NULL); + rustsecp256k1_v0_10_0_ge_set_gej_var(&dgen, &gj); for (j = 1; j < ECMULT_TABLE_SIZE(window_g); ++j) { - rustsecp256k1_v0_9_2_gej_set_ge(&gj, &ge); - rustsecp256k1_v0_9_2_gej_add_ge_var(&gj, &gj, &dgen, NULL); - rustsecp256k1_v0_9_2_ge_set_gej_var(&ge, &gj); - rustsecp256k1_v0_9_2_ge_to_storage(&table[j], &ge); + rustsecp256k1_v0_10_0_gej_set_ge(&gj, &ge); + rustsecp256k1_v0_10_0_gej_add_ge_var(&gj, &gj, &dgen, NULL); + rustsecp256k1_v0_10_0_ge_set_gej_var(&ge, &gj); + rustsecp256k1_v0_10_0_ge_to_storage(&table[j], &ge); } } -/* Like rustsecp256k1_v0_9_2_ecmult_compute_table, but one for both gen and gen*2^128. */ -static void rustsecp256k1_v0_9_2_ecmult_compute_two_tables(rustsecp256k1_v0_9_2_ge_storage* table, rustsecp256k1_v0_9_2_ge_storage* table_128, int window_g, const rustsecp256k1_v0_9_2_ge* gen) { - rustsecp256k1_v0_9_2_gej gj; +/* Like rustsecp256k1_v0_10_0_ecmult_compute_table, but one for both gen and gen*2^128. */ +static void rustsecp256k1_v0_10_0_ecmult_compute_two_tables(rustsecp256k1_v0_10_0_ge_storage* table, rustsecp256k1_v0_10_0_ge_storage* table_128, int window_g, const rustsecp256k1_v0_10_0_ge* gen) { + rustsecp256k1_v0_10_0_gej gj; int i; - rustsecp256k1_v0_9_2_gej_set_ge(&gj, gen); - rustsecp256k1_v0_9_2_ecmult_compute_table(table, window_g, &gj); + rustsecp256k1_v0_10_0_gej_set_ge(&gj, gen); + rustsecp256k1_v0_10_0_ecmult_compute_table(table, window_g, &gj); for (i = 0; i < 128; ++i) { - rustsecp256k1_v0_9_2_gej_double_var(&gj, &gj, NULL); + rustsecp256k1_v0_10_0_gej_double_var(&gj, &gj, NULL); } - rustsecp256k1_v0_9_2_ecmult_compute_table(table_128, window_g, &gj); + rustsecp256k1_v0_10_0_ecmult_compute_table(table_128, window_g, &gj); } #endif /* SECP256K1_ECMULT_COMPUTE_TABLE_IMPL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult_const.h b/secp256k1-sys/depend/secp256k1/src/ecmult_const.h index d5e689eb6..06dc9bfa5 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult_const.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult_const.h @@ -13,10 +13,10 @@ /** * Multiply: R = q*A (in constant-time for q) */ -static void rustsecp256k1_v0_9_2_ecmult_const(rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_ge *a, const rustsecp256k1_v0_9_2_scalar *q); +static void rustsecp256k1_v0_10_0_ecmult_const(rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_ge *a, const rustsecp256k1_v0_10_0_scalar *q); /** - * Same as rustsecp256k1_v0_9_2_ecmult_const, but takes in an x coordinate of the base point + * Same as rustsecp256k1_v0_10_0_ecmult_const, but takes in an x coordinate of the base point * only, specified as fraction n/d (numerator/denominator). Only the x coordinate of the result is * returned. * @@ -27,11 +27,11 @@ static void rustsecp256k1_v0_9_2_ecmult_const(rustsecp256k1_v0_9_2_gej *r, const * * Constant time in the value of q, but not any other inputs. */ -static int rustsecp256k1_v0_9_2_ecmult_const_xonly( - rustsecp256k1_v0_9_2_fe *r, - const rustsecp256k1_v0_9_2_fe *n, - const rustsecp256k1_v0_9_2_fe *d, - const rustsecp256k1_v0_9_2_scalar *q, +static int rustsecp256k1_v0_10_0_ecmult_const_xonly( + rustsecp256k1_v0_10_0_fe *r, + const rustsecp256k1_v0_10_0_fe *n, + const rustsecp256k1_v0_10_0_fe *d, + const rustsecp256k1_v0_10_0_scalar *q, int known_on_curve ); diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult_const_impl.h b/secp256k1-sys/depend/secp256k1/src/ecmult_const_impl.h index b1b4fcb06..5a9b3fd92 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult_const_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult_const_impl.h @@ -1,5 +1,5 @@ /*********************************************************************** - * Copyright (c) 2015 Pieter Wuille, Andrew Poelstra * + * Copyright (c) 2015, 2022 Pieter Wuille, Andrew Poelstra * * Distributed under the MIT software license, see the accompanying * * file COPYING or https://www.opensource.org/licenses/mit-license.php.* ***********************************************************************/ @@ -12,211 +12,262 @@ #include "ecmult_const.h" #include "ecmult_impl.h" +#if defined(EXHAUSTIVE_TEST_ORDER) +/* We need 2^ECMULT_CONST_GROUP_SIZE - 1 to be less than EXHAUSTIVE_TEST_ORDER, because + * the tables cannot have infinities in them (this breaks the effective-affine technique's + * z-ratio tracking) */ +# if EXHAUSTIVE_TEST_ORDER == 199 +# define ECMULT_CONST_GROUP_SIZE 4 +# elif EXHAUSTIVE_TEST_ORDER == 13 +# define ECMULT_CONST_GROUP_SIZE 3 +# elif EXHAUSTIVE_TEST_ORDER == 7 +# define ECMULT_CONST_GROUP_SIZE 2 +# else +# error "Unknown EXHAUSTIVE_TEST_ORDER" +# endif +#else +/* Group size 4 or 5 appears optimal. */ +# define ECMULT_CONST_GROUP_SIZE 5 +#endif + +#define ECMULT_CONST_TABLE_SIZE (1L << (ECMULT_CONST_GROUP_SIZE - 1)) +#define ECMULT_CONST_GROUPS ((129 + ECMULT_CONST_GROUP_SIZE - 1) / ECMULT_CONST_GROUP_SIZE) +#define ECMULT_CONST_BITS (ECMULT_CONST_GROUPS * ECMULT_CONST_GROUP_SIZE) + /** Fill a table 'pre' with precomputed odd multiples of a. * * The resulting point set is brought to a single constant Z denominator, stores the X and Y - * coordinates as ge_storage points in pre, and stores the global Z in globalz. - * It only operates on tables sized for WINDOW_A wnaf multiples. + * coordinates as ge points in pre, and stores the global Z in globalz. + * + * 'pre' must be an array of size ECMULT_CONST_TABLE_SIZE. */ -static void rustsecp256k1_v0_9_2_ecmult_odd_multiples_table_globalz_windowa(rustsecp256k1_v0_9_2_ge *pre, rustsecp256k1_v0_9_2_fe *globalz, const rustsecp256k1_v0_9_2_gej *a) { - rustsecp256k1_v0_9_2_fe zr[ECMULT_TABLE_SIZE(WINDOW_A)]; +static void rustsecp256k1_v0_10_0_ecmult_const_odd_multiples_table_globalz(rustsecp256k1_v0_10_0_ge *pre, rustsecp256k1_v0_10_0_fe *globalz, const rustsecp256k1_v0_10_0_gej *a) { + rustsecp256k1_v0_10_0_fe zr[ECMULT_CONST_TABLE_SIZE]; - rustsecp256k1_v0_9_2_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), pre, zr, globalz, a); - rustsecp256k1_v0_9_2_ge_table_set_globalz(ECMULT_TABLE_SIZE(WINDOW_A), pre, zr); + rustsecp256k1_v0_10_0_ecmult_odd_multiples_table(ECMULT_CONST_TABLE_SIZE, pre, zr, globalz, a); + rustsecp256k1_v0_10_0_ge_table_set_globalz(ECMULT_CONST_TABLE_SIZE, pre, zr); } -/* This is like `ECMULT_TABLE_GET_GE` but is constant time */ -#define ECMULT_CONST_TABLE_GET_GE(r,pre,n,w) do { \ - int m = 0; \ - /* Extract the sign-bit for a constant time absolute-value. */ \ - int volatile mask = (n) >> (sizeof(n) * CHAR_BIT - 1); \ - int abs_n = ((n) + mask) ^ mask; \ - int idx_n = abs_n >> 1; \ - rustsecp256k1_v0_9_2_fe neg_y; \ - VERIFY_CHECK(((n) & 1) == 1); \ - VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \ - VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \ - VERIFY_SETUP(rustsecp256k1_v0_9_2_fe_clear(&(r)->x)); \ - VERIFY_SETUP(rustsecp256k1_v0_9_2_fe_clear(&(r)->y)); \ - /* Unconditionally set r->x = (pre)[m].x. r->y = (pre)[m].y. because it's either the correct one \ +/* Given a table 'pre' with odd multiples of a point, put in r the signed-bit multiplication of n with that point. + * + * For example, if ECMULT_CONST_GROUP_SIZE is 4, then pre is expected to contain 8 entries: + * [1*P, 3*P, 5*P, 7*P, 9*P, 11*P, 13*P, 15*P]. n is then expected to be a 4-bit integer (range 0-15), and its + * bits are interpreted as signs of powers of two to look up. + * + * For example, if n=4, which is 0100 in binary, which is interpreted as [- + - -], so the looked up value is + * [ -(2^3) + (2^2) - (2^1) - (2^0) ]*P = -7*P. Every valid n translates to an odd number in range [-15,15], + * which means we just need to look up one of the precomputed values, and optionally negate it. + */ +#define ECMULT_CONST_TABLE_GET_GE(r,pre,n) do { \ + unsigned int m = 0; \ + /* If the top bit of n is 0, we want the negation. */ \ + volatile unsigned int negative = ((n) >> (ECMULT_CONST_GROUP_SIZE - 1)) ^ 1; \ + /* Let n[i] be the i-th bit of n, then the index is + * sum(cnot(n[i]) * 2^i, i=0..l-2) + * where cnot(b) = b if n[l-1] = 1 and 1 - b otherwise. + * For example, if n = 4, in binary 0100, the index is 3, in binary 011. + * + * Proof: + * Let + * x = sum((2*n[i] - 1)*2^i, i=0..l-1) + * = 2*sum(n[i] * 2^i, i=0..l-1) - 2^l + 1 + * be the value represented by n. + * The index is (x - 1)/2 if x > 0 and -(x + 1)/2 otherwise. + * Case x > 0: + * n[l-1] = 1 + * index = sum(n[i] * 2^i, i=0..l-1) - 2^(l-1) + * = sum(n[i] * 2^i, i=0..l-2) + * Case x <= 0: + * n[l-1] = 0 + * index = -(2*sum(n[i] * 2^i, i=0..l-1) - 2^l + 2)/2 + * = 2^(l-1) - 1 - sum(n[i] * 2^i, i=0..l-1) + * = sum((1 - n[i]) * 2^i, i=0..l-2) + */ \ + unsigned int index = ((unsigned int)(-negative) ^ n) & ((1U << (ECMULT_CONST_GROUP_SIZE - 1)) - 1U); \ + rustsecp256k1_v0_10_0_fe neg_y; \ + VERIFY_CHECK((n) < (1U << ECMULT_CONST_GROUP_SIZE)); \ + VERIFY_CHECK(index < (1U << (ECMULT_CONST_GROUP_SIZE - 1))); \ + /* Unconditionally set r->x = (pre)[m].x. r->y = (pre)[m].y. because it's either the correct one * or will get replaced in the later iterations, this is needed to make sure `r` is initialized. */ \ (r)->x = (pre)[m].x; \ (r)->y = (pre)[m].y; \ - for (m = 1; m < ECMULT_TABLE_SIZE(w); m++) { \ + for (m = 1; m < ECMULT_CONST_TABLE_SIZE; m++) { \ /* This loop is used to avoid secret data in array indices. See * the comment in ecmult_gen_impl.h for rationale. */ \ - rustsecp256k1_v0_9_2_fe_cmov(&(r)->x, &(pre)[m].x, m == idx_n); \ - rustsecp256k1_v0_9_2_fe_cmov(&(r)->y, &(pre)[m].y, m == idx_n); \ + rustsecp256k1_v0_10_0_fe_cmov(&(r)->x, &(pre)[m].x, m == index); \ + rustsecp256k1_v0_10_0_fe_cmov(&(r)->y, &(pre)[m].y, m == index); \ } \ (r)->infinity = 0; \ - rustsecp256k1_v0_9_2_fe_negate(&neg_y, &(r)->y, 1); \ - rustsecp256k1_v0_9_2_fe_cmov(&(r)->y, &neg_y, (n) != abs_n); \ + rustsecp256k1_v0_10_0_fe_negate(&neg_y, &(r)->y, 1); \ + rustsecp256k1_v0_10_0_fe_cmov(&(r)->y, &neg_y, negative); \ } while(0) -/** Convert a number to WNAF notation. - * The number becomes represented by sum(2^{wi} * wnaf[i], i=0..WNAF_SIZE(w)+1) - return_val. - * It has the following guarantees: - * - each wnaf[i] an odd integer between -(1 << w) and (1 << w) - * - each wnaf[i] is nonzero - * - the number of words set is always WNAF_SIZE(w) + 1 - * - * Adapted from `The Width-w NAF Method Provides Small Memory and Fast Elliptic Scalar - * Multiplications Secure against Side Channel Attacks`, Okeya and Tagaki. M. Joye (Ed.) - * CT-RSA 2003, LNCS 2612, pp. 328-443, 2003. Springer-Verlag Berlin Heidelberg 2003 - * - * Numbers reference steps of `Algorithm SPA-resistant Width-w NAF with Odd Scalar` on pp. 335 - */ -static int rustsecp256k1_v0_9_2_wnaf_const(int *wnaf, const rustsecp256k1_v0_9_2_scalar *scalar, int w, int size) { - int global_sign; - int skew; - int word = 0; - - /* 1 2 3 */ - int u_last; - int u; - - int flip; - rustsecp256k1_v0_9_2_scalar s = *scalar; - - VERIFY_CHECK(w > 0); - VERIFY_CHECK(size > 0); +/* For K as defined in the comment of rustsecp256k1_v0_10_0_ecmult_const, we have several precomputed + * formulas/constants. + * - in exhaustive test mode, we give an explicit expression to compute it at compile time: */ +#ifdef EXHAUSTIVE_TEST_ORDER +static const rustsecp256k1_v0_10_0_scalar rustsecp256k1_v0_10_0_ecmult_const_K = ((SECP256K1_SCALAR_CONST(0, 0, 0, (1U << (ECMULT_CONST_BITS - 128)) - 2U, 0, 0, 0, 0) + EXHAUSTIVE_TEST_ORDER - 1U) * (1U + EXHAUSTIVE_TEST_LAMBDA)) % EXHAUSTIVE_TEST_ORDER; +/* - for the real secp256k1 group we have constants for various ECMULT_CONST_BITS values. */ +#elif ECMULT_CONST_BITS == 129 +/* For GROUP_SIZE = 1,3. */ +static const rustsecp256k1_v0_10_0_scalar rustsecp256k1_v0_10_0_ecmult_const_K = SECP256K1_SCALAR_CONST(0xac9c52b3ul, 0x3fa3cf1ful, 0x5ad9e3fdul, 0x77ed9ba4ul, 0xa880b9fcul, 0x8ec739c2ul, 0xe0cfc810ul, 0xb51283ceul); +#elif ECMULT_CONST_BITS == 130 +/* For GROUP_SIZE = 2,5. */ +static const rustsecp256k1_v0_10_0_scalar rustsecp256k1_v0_10_0_ecmult_const_K = SECP256K1_SCALAR_CONST(0xa4e88a7dul, 0xcb13034eul, 0xc2bdd6bful, 0x7c118d6bul, 0x589ae848ul, 0x26ba29e4ul, 0xb5c2c1dcul, 0xde9798d9ul); +#elif ECMULT_CONST_BITS == 132 +/* For GROUP_SIZE = 4,6 */ +static const rustsecp256k1_v0_10_0_scalar rustsecp256k1_v0_10_0_ecmult_const_K = SECP256K1_SCALAR_CONST(0x76b1d93dul, 0x0fae3c6bul, 0x3215874bul, 0x94e93813ul, 0x7937fe0dul, 0xb66bcaaful, 0xb3749ca5ul, 0xd7b6171bul); +#else +# error "Unknown ECMULT_CONST_BITS" +#endif - /* Note that we cannot handle even numbers by negating them to be odd, as is - * done in other implementations, since if our scalars were specified to have - * width < 256 for performance reasons, their negations would have width 256 - * and we'd lose any performance benefit. Instead, we use a variation of a - * technique from Section 4.2 of the Okeya/Tagaki paper, which is to add 1 to the - * number we are encoding when it is even, returning a skew value indicating - * this, and having the caller compensate after doing the multiplication. +static void rustsecp256k1_v0_10_0_ecmult_const(rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_ge *a, const rustsecp256k1_v0_10_0_scalar *q) { + /* The approach below combines the signed-digit logic from Mike Hamburg's + * "Fast and compact elliptic-curve cryptography" (https://eprint.iacr.org/2012/309) + * Section 3.3, with the GLV endomorphism. * - * In fact, we _do_ want to negate numbers to minimize their bit-lengths (and in - * particular, to ensure that the outputs from the endomorphism-split fit into - * 128 bits). If we negate, the parity of our number flips, affecting whether - * we want to add to the scalar to ensure that it's odd. */ - flip = rustsecp256k1_v0_9_2_scalar_is_high(&s); - skew = flip ^ rustsecp256k1_v0_9_2_scalar_is_even(&s); - rustsecp256k1_v0_9_2_scalar_cadd_bit(&s, 0, skew); - global_sign = rustsecp256k1_v0_9_2_scalar_cond_negate(&s, flip); - - /* 4 */ - u_last = rustsecp256k1_v0_9_2_scalar_shr_int(&s, w); - do { - int even; - - /* 4.1 4.4 */ - u = rustsecp256k1_v0_9_2_scalar_shr_int(&s, w); - /* 4.2 */ - even = ((u & 1) == 0); - /* In contrast to the original algorithm, u_last is always > 0 and - * therefore we do not need to check its sign. In particular, it's easy - * to see that u_last is never < 0 because u is never < 0. Moreover, - * u_last is never = 0 because u is never even after a loop - * iteration. The same holds analogously for the initial value of - * u_last (in the first loop iteration). */ - VERIFY_CHECK(u_last > 0); - VERIFY_CHECK((u_last & 1) == 1); - u += even; - u_last -= even * (1 << w); - - /* 4.3, adapted for global sign change */ - wnaf[word++] = u_last * global_sign; - - u_last = u; - } while (word * w < size); - wnaf[word] = u * global_sign; - - VERIFY_CHECK(rustsecp256k1_v0_9_2_scalar_is_zero(&s)); - VERIFY_CHECK(word == WNAF_SIZE_BITS(size, w)); - return skew; -} - -static void rustsecp256k1_v0_9_2_ecmult_const(rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_ge *a, const rustsecp256k1_v0_9_2_scalar *scalar) { - rustsecp256k1_v0_9_2_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)]; - rustsecp256k1_v0_9_2_ge tmpa; - rustsecp256k1_v0_9_2_fe Z; - - int skew_1; - rustsecp256k1_v0_9_2_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)]; - int wnaf_lam[1 + WNAF_SIZE(WINDOW_A - 1)]; - int skew_lam; - rustsecp256k1_v0_9_2_scalar q_1, q_lam; - int wnaf_1[1 + WNAF_SIZE(WINDOW_A - 1)]; - - int i; + * The idea there is to interpret the bits of a scalar as signs (1 = +, 0 = -), and compute a + * point multiplication in that fashion. Let v be an n-bit non-negative integer (0 <= v < 2^n), + * and v[i] its i'th bit (so v = sum(v[i] * 2^i, i=0..n-1)). Then define: + * + * C_l(v, A) = sum((2*v[i] - 1) * 2^i*A, i=0..l-1) + * + * Then it holds that C_l(v, A) = sum((2*v[i] - 1) * 2^i*A, i=0..l-1) + * = (2*sum(v[i] * 2^i, i=0..l-1) + 1 - 2^l) * A + * = (2*v + 1 - 2^l) * A + * + * Thus, one can compute q*A as C_256((q + 2^256 - 1) / 2, A). This is the basis for the + * paper's signed-digit multi-comb algorithm for multiplication using a precomputed table. + * + * It is appealing to try to combine this with the GLV optimization: the idea that a scalar + * s can be written as s1 + lambda*s2, where lambda is a curve-specific constant such that + * lambda*A is easy to compute, and where s1 and s2 are small. In particular we have the + * rustsecp256k1_v0_10_0_scalar_split_lambda function which performs such a split with the resulting s1 + * and s2 in range (-2^128, 2^128) mod n. This does work, but is uninteresting: + * + * To compute q*A: + * - Let s1, s2 = split_lambda(q) + * - Let R1 = C_256((s1 + 2^256 - 1) / 2, A) + * - Let R2 = C_256((s2 + 2^256 - 1) / 2, lambda*A) + * - Return R1 + R2 + * + * The issue is that while s1 and s2 are small-range numbers, (s1 + 2^256 - 1) / 2 (mod n) + * and (s2 + 2^256 - 1) / 2 (mod n) are not, undoing the benefit of the splitting. + * + * To make it work, we want to modify the input scalar q first, before splitting, and then only + * add a 2^128 offset of the split results (so that they end up in the single 129-bit range + * [0,2^129]). A slightly smaller offset would work due to the bounds on the split, but we pick + * 2^128 for simplicity. Let s be the scalar fed to split_lambda, and f(q) the function to + * compute it from q: + * + * To compute q*A: + * - Compute s = f(q) + * - Let s1, s2 = split_lambda(s) + * - Let v1 = s1 + 2^128 (mod n) + * - Let v2 = s2 + 2^128 (mod n) + * - Let R1 = C_l(v1, A) + * - Let R2 = C_l(v2, lambda*A) + * - Return R1 + R2 + * + * l will thus need to be at least 129, but we may overshoot by a few bits (see + * further), so keep it as a variable. + * + * To solve for s, we reason: + * q*A = R1 + R2 + * <=> q*A = C_l(s1 + 2^128, A) + C_l(s2 + 2^128, lambda*A) + * <=> q*A = (2*(s1 + 2^128) + 1 - 2^l) * A + (2*(s2 + 2^128) + 1 - 2^l) * lambda*A + * <=> q*A = (2*(s1 + s2*lambda) + (2^129 + 1 - 2^l) * (1 + lambda)) * A + * <=> q = 2*(s1 + s2*lambda) + (2^129 + 1 - 2^l) * (1 + lambda) (mod n) + * <=> q = 2*s + (2^129 + 1 - 2^l) * (1 + lambda) (mod n) + * <=> s = (q + (2^l - 2^129 - 1) * (1 + lambda)) / 2 (mod n) + * <=> f(q) = (q + K) / 2 (mod n) + * where K = (2^l - 2^129 - 1)*(1 + lambda) (mod n) + * + * We will process the computation of C_l(v1, A) and C_l(v2, lambda*A) in groups of + * ECMULT_CONST_GROUP_SIZE, so we set l to the smallest multiple of ECMULT_CONST_GROUP_SIZE + * that is not less than 129; this equals ECMULT_CONST_BITS. + */ - if (rustsecp256k1_v0_9_2_ge_is_infinity(a)) { - rustsecp256k1_v0_9_2_gej_set_infinity(r); + /* The offset to add to s1 and s2 to make them non-negative. Equal to 2^128. */ + static const rustsecp256k1_v0_10_0_scalar S_OFFSET = SECP256K1_SCALAR_CONST(0, 0, 0, 1, 0, 0, 0, 0); + rustsecp256k1_v0_10_0_scalar s, v1, v2; + rustsecp256k1_v0_10_0_ge pre_a[ECMULT_CONST_TABLE_SIZE]; + rustsecp256k1_v0_10_0_ge pre_a_lam[ECMULT_CONST_TABLE_SIZE]; + rustsecp256k1_v0_10_0_fe global_z; + int group, i; + + /* We're allowed to be non-constant time in the point, and the code below (in particular, + * rustsecp256k1_v0_10_0_ecmult_const_odd_multiples_table_globalz) cannot deal with infinity in a + * constant-time manner anyway. */ + if (rustsecp256k1_v0_10_0_ge_is_infinity(a)) { + rustsecp256k1_v0_10_0_gej_set_infinity(r); return; } - /* build wnaf representation for q. */ - /* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */ - rustsecp256k1_v0_9_2_scalar_split_lambda(&q_1, &q_lam, scalar); - skew_1 = rustsecp256k1_v0_9_2_wnaf_const(wnaf_1, &q_1, WINDOW_A - 1, 128); - skew_lam = rustsecp256k1_v0_9_2_wnaf_const(wnaf_lam, &q_lam, WINDOW_A - 1, 128); + /* Compute v1 and v2. */ + rustsecp256k1_v0_10_0_scalar_add(&s, q, &rustsecp256k1_v0_10_0_ecmult_const_K); + rustsecp256k1_v0_10_0_scalar_half(&s, &s); + rustsecp256k1_v0_10_0_scalar_split_lambda(&v1, &v2, &s); + rustsecp256k1_v0_10_0_scalar_add(&v1, &v1, &S_OFFSET); + rustsecp256k1_v0_10_0_scalar_add(&v2, &v2, &S_OFFSET); - /* Calculate odd multiples of a. +#ifdef VERIFY + /* Verify that v1 and v2 are in range [0, 2^129-1]. */ + for (i = 129; i < 256; ++i) { + VERIFY_CHECK(rustsecp256k1_v0_10_0_scalar_get_bits(&v1, i, 1) == 0); + VERIFY_CHECK(rustsecp256k1_v0_10_0_scalar_get_bits(&v2, i, 1) == 0); + } +#endif + + /* Calculate odd multiples of A and A*lambda. * All multiples are brought to the same Z 'denominator', which is stored - * in Z. Due to secp256k1' isomorphism we can do all operations pretending + * in global_z. Due to secp256k1' isomorphism we can do all operations pretending * that the Z coordinate was 1, use affine addition formulae, and correct * the Z coordinate of the result once at the end. */ - VERIFY_CHECK(!a->infinity); - rustsecp256k1_v0_9_2_gej_set_ge(r, a); - rustsecp256k1_v0_9_2_ecmult_odd_multiples_table_globalz_windowa(pre_a, &Z, r); - for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) { - rustsecp256k1_v0_9_2_fe_normalize_weak(&pre_a[i].y); - } - for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) { - rustsecp256k1_v0_9_2_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]); + rustsecp256k1_v0_10_0_gej_set_ge(r, a); + rustsecp256k1_v0_10_0_ecmult_const_odd_multiples_table_globalz(pre_a, &global_z, r); + for (i = 0; i < ECMULT_CONST_TABLE_SIZE; i++) { + rustsecp256k1_v0_10_0_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]); } - /* first loop iteration (separated out so we can directly set r, rather - * than having it start at infinity, get doubled several times, then have - * its new value added to it) */ - i = wnaf_1[WNAF_SIZE_BITS(128, WINDOW_A - 1)]; - VERIFY_CHECK(i != 0); - ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, i, WINDOW_A); - rustsecp256k1_v0_9_2_gej_set_ge(r, &tmpa); - i = wnaf_lam[WNAF_SIZE_BITS(128, WINDOW_A - 1)]; - VERIFY_CHECK(i != 0); - ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, i, WINDOW_A); - rustsecp256k1_v0_9_2_gej_add_ge(r, r, &tmpa); - /* remaining loop iterations */ - for (i = WNAF_SIZE_BITS(128, WINDOW_A - 1) - 1; i >= 0; i--) { - int n; + /* Next, we compute r = C_l(v1, A) + C_l(v2, lambda*A). + * + * We proceed in groups of ECMULT_CONST_GROUP_SIZE bits, operating on that many bits + * at a time, from high in v1, v2 to low. Call these bits1 (from v1) and bits2 (from v2). + * + * Now note that ECMULT_CONST_TABLE_GET_GE(&t, pre_a, bits1) loads into t a point equal + * to C_{ECMULT_CONST_GROUP_SIZE}(bits1, A), and analogously for pre_lam_a / bits2. + * This means that all we need to do is add these looked up values together, multiplied + * by 2^(ECMULT_GROUP_SIZE * group). + */ + for (group = ECMULT_CONST_GROUPS - 1; group >= 0; --group) { + /* Using the _var get_bits function is ok here, since it's only variable in offset and count, not in the scalar. */ + unsigned int bits1 = rustsecp256k1_v0_10_0_scalar_get_bits_var(&v1, group * ECMULT_CONST_GROUP_SIZE, ECMULT_CONST_GROUP_SIZE); + unsigned int bits2 = rustsecp256k1_v0_10_0_scalar_get_bits_var(&v2, group * ECMULT_CONST_GROUP_SIZE, ECMULT_CONST_GROUP_SIZE); + rustsecp256k1_v0_10_0_ge t; int j; - for (j = 0; j < WINDOW_A - 1; ++j) { - rustsecp256k1_v0_9_2_gej_double(r, r); - } - - n = wnaf_1[i]; - ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A); - VERIFY_CHECK(n != 0); - rustsecp256k1_v0_9_2_gej_add_ge(r, r, &tmpa); - n = wnaf_lam[i]; - ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, n, WINDOW_A); - VERIFY_CHECK(n != 0); - rustsecp256k1_v0_9_2_gej_add_ge(r, r, &tmpa); - } - - { - /* Correct for wNAF skew */ - rustsecp256k1_v0_9_2_gej tmpj; - - rustsecp256k1_v0_9_2_ge_neg(&tmpa, &pre_a[0]); - rustsecp256k1_v0_9_2_gej_add_ge(&tmpj, r, &tmpa); - rustsecp256k1_v0_9_2_gej_cmov(r, &tmpj, skew_1); - rustsecp256k1_v0_9_2_ge_neg(&tmpa, &pre_a_lam[0]); - rustsecp256k1_v0_9_2_gej_add_ge(&tmpj, r, &tmpa); - rustsecp256k1_v0_9_2_gej_cmov(r, &tmpj, skew_lam); + ECMULT_CONST_TABLE_GET_GE(&t, pre_a, bits1); + if (group == ECMULT_CONST_GROUPS - 1) { + /* Directly set r in the first iteration. */ + rustsecp256k1_v0_10_0_gej_set_ge(r, &t); + } else { + /* Shift the result so far up. */ + for (j = 0; j < ECMULT_CONST_GROUP_SIZE; ++j) { + rustsecp256k1_v0_10_0_gej_double(r, r); + } + rustsecp256k1_v0_10_0_gej_add_ge(r, r, &t); + } + ECMULT_CONST_TABLE_GET_GE(&t, pre_a_lam, bits2); + rustsecp256k1_v0_10_0_gej_add_ge(r, r, &t); } - rustsecp256k1_v0_9_2_fe_mul(&r->z, &r->z, &Z); + /* Map the result back to the secp256k1 curve from the isomorphic curve. */ + rustsecp256k1_v0_10_0_fe_mul(&r->z, &r->z, &global_z); } -static int rustsecp256k1_v0_9_2_ecmult_const_xonly(rustsecp256k1_v0_9_2_fe* r, const rustsecp256k1_v0_9_2_fe *n, const rustsecp256k1_v0_9_2_fe *d, const rustsecp256k1_v0_9_2_scalar *q, int known_on_curve) { +static int rustsecp256k1_v0_10_0_ecmult_const_xonly(rustsecp256k1_v0_10_0_fe* r, const rustsecp256k1_v0_10_0_fe *n, const rustsecp256k1_v0_10_0_fe *d, const rustsecp256k1_v0_10_0_scalar *q, int known_on_curve) { /* This algorithm is a generalization of Peter Dettman's technique for * avoiding the square root in a random-basepoint x-only multiplication @@ -287,23 +338,21 @@ static int rustsecp256k1_v0_9_2_ecmult_const_xonly(rustsecp256k1_v0_9_2_fe* r, c * is needed anywhere in this computation. */ - rustsecp256k1_v0_9_2_fe g, i; - rustsecp256k1_v0_9_2_ge p; - rustsecp256k1_v0_9_2_gej rj; + rustsecp256k1_v0_10_0_fe g, i; + rustsecp256k1_v0_10_0_ge p; + rustsecp256k1_v0_10_0_gej rj; /* Compute g = (n^3 + B*d^3). */ - rustsecp256k1_v0_9_2_fe_sqr(&g, n); - rustsecp256k1_v0_9_2_fe_mul(&g, &g, n); + rustsecp256k1_v0_10_0_fe_sqr(&g, n); + rustsecp256k1_v0_10_0_fe_mul(&g, &g, n); if (d) { - rustsecp256k1_v0_9_2_fe b; -#ifdef VERIFY - VERIFY_CHECK(!rustsecp256k1_v0_9_2_fe_normalizes_to_zero(d)); -#endif - rustsecp256k1_v0_9_2_fe_sqr(&b, d); + rustsecp256k1_v0_10_0_fe b; + VERIFY_CHECK(!rustsecp256k1_v0_10_0_fe_normalizes_to_zero(d)); + rustsecp256k1_v0_10_0_fe_sqr(&b, d); VERIFY_CHECK(SECP256K1_B <= 8); /* magnitude of b will be <= 8 after the next call */ - rustsecp256k1_v0_9_2_fe_mul_int(&b, SECP256K1_B); - rustsecp256k1_v0_9_2_fe_mul(&b, &b, d); - rustsecp256k1_v0_9_2_fe_add(&g, &b); + rustsecp256k1_v0_10_0_fe_mul_int(&b, SECP256K1_B); + rustsecp256k1_v0_10_0_fe_mul(&b, &b, d); + rustsecp256k1_v0_10_0_fe_add(&g, &b); if (!known_on_curve) { /* We need to determine whether (n/d)^3 + 7 is square. * @@ -312,41 +361,37 @@ static int rustsecp256k1_v0_9_2_ecmult_const_xonly(rustsecp256k1_v0_9_2_fe* r, c * <=> is_square((n^3 + 7*d^3) * d) * <=> is_square(g * d) */ - rustsecp256k1_v0_9_2_fe c; - rustsecp256k1_v0_9_2_fe_mul(&c, &g, d); - if (!rustsecp256k1_v0_9_2_fe_is_square_var(&c)) return 0; + rustsecp256k1_v0_10_0_fe c; + rustsecp256k1_v0_10_0_fe_mul(&c, &g, d); + if (!rustsecp256k1_v0_10_0_fe_is_square_var(&c)) return 0; } } else { - rustsecp256k1_v0_9_2_fe_add_int(&g, SECP256K1_B); + rustsecp256k1_v0_10_0_fe_add_int(&g, SECP256K1_B); if (!known_on_curve) { /* g at this point equals x^3 + 7. Test if it is square. */ - if (!rustsecp256k1_v0_9_2_fe_is_square_var(&g)) return 0; + if (!rustsecp256k1_v0_10_0_fe_is_square_var(&g)) return 0; } } /* Compute base point P = (n*g, g^2), the effective affine version of (n*g, g^2, v), which has * corresponding affine X coordinate n/d. */ - rustsecp256k1_v0_9_2_fe_mul(&p.x, &g, n); - rustsecp256k1_v0_9_2_fe_sqr(&p.y, &g); + rustsecp256k1_v0_10_0_fe_mul(&p.x, &g, n); + rustsecp256k1_v0_10_0_fe_sqr(&p.y, &g); p.infinity = 0; /* Perform x-only EC multiplication of P with q. */ -#ifdef VERIFY - VERIFY_CHECK(!rustsecp256k1_v0_9_2_scalar_is_zero(q)); -#endif - rustsecp256k1_v0_9_2_ecmult_const(&rj, &p, q); -#ifdef VERIFY - VERIFY_CHECK(!rustsecp256k1_v0_9_2_gej_is_infinity(&rj)); -#endif + VERIFY_CHECK(!rustsecp256k1_v0_10_0_scalar_is_zero(q)); + rustsecp256k1_v0_10_0_ecmult_const(&rj, &p, q); + VERIFY_CHECK(!rustsecp256k1_v0_10_0_gej_is_infinity(&rj)); /* The resulting (X, Y, Z) point on the effective-affine isomorphic curve corresponds to * (X, Y, Z*v) on the secp256k1 curve. The affine version of that has X coordinate * (X / (Z^2*d*g)). */ - rustsecp256k1_v0_9_2_fe_sqr(&i, &rj.z); - rustsecp256k1_v0_9_2_fe_mul(&i, &i, &g); - if (d) rustsecp256k1_v0_9_2_fe_mul(&i, &i, d); - rustsecp256k1_v0_9_2_fe_inv(&i, &i); - rustsecp256k1_v0_9_2_fe_mul(r, &rj.x, &i); + rustsecp256k1_v0_10_0_fe_sqr(&i, &rj.z); + rustsecp256k1_v0_10_0_fe_mul(&i, &i, &g); + if (d) rustsecp256k1_v0_10_0_fe_mul(&i, &i, d); + rustsecp256k1_v0_10_0_fe_inv(&i, &i); + rustsecp256k1_v0_10_0_fe_mul(r, &rj.x, &i); return 1; } diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult_gen.h b/secp256k1-sys/depend/secp256k1/src/ecmult_gen.h index 233d812df..ba431a482 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult_gen.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult_gen.h @@ -33,16 +33,16 @@ typedef struct { int built; /* Blinding values used when computing (n-b)G + bG. */ - rustsecp256k1_v0_9_2_scalar blind; /* -b */ - rustsecp256k1_v0_9_2_gej initial; /* bG */ -} rustsecp256k1_v0_9_2_ecmult_gen_context; + rustsecp256k1_v0_10_0_scalar blind; /* -b */ + rustsecp256k1_v0_10_0_gej initial; /* bG */ +} rustsecp256k1_v0_10_0_ecmult_gen_context; -static void rustsecp256k1_v0_9_2_ecmult_gen_context_build(rustsecp256k1_v0_9_2_ecmult_gen_context* ctx); -static void rustsecp256k1_v0_9_2_ecmult_gen_context_clear(rustsecp256k1_v0_9_2_ecmult_gen_context* ctx); +static void rustsecp256k1_v0_10_0_ecmult_gen_context_build(rustsecp256k1_v0_10_0_ecmult_gen_context* ctx); +static void rustsecp256k1_v0_10_0_ecmult_gen_context_clear(rustsecp256k1_v0_10_0_ecmult_gen_context* ctx); /** Multiply with the generator: R = a*G */ -static void rustsecp256k1_v0_9_2_ecmult_gen(const rustsecp256k1_v0_9_2_ecmult_gen_context* ctx, rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_scalar *a); +static void rustsecp256k1_v0_10_0_ecmult_gen(const rustsecp256k1_v0_10_0_ecmult_gen_context* ctx, rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_scalar *a); -static void rustsecp256k1_v0_9_2_ecmult_gen_blind(rustsecp256k1_v0_9_2_ecmult_gen_context *ctx, const unsigned char *seed32); +static void rustsecp256k1_v0_10_0_ecmult_gen_blind(rustsecp256k1_v0_10_0_ecmult_gen_context *ctx, const unsigned char *seed32); #endif /* SECP256K1_ECMULT_GEN_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult_gen_compute_table.h b/secp256k1-sys/depend/secp256k1/src/ecmult_gen_compute_table.h index 1155b74d3..d608eda76 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult_gen_compute_table.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult_gen_compute_table.h @@ -9,6 +9,6 @@ #include "ecmult_gen.h" -static void rustsecp256k1_v0_9_2_ecmult_gen_compute_table(rustsecp256k1_v0_9_2_ge_storage* table, const rustsecp256k1_v0_9_2_ge* gen, int bits); +static void rustsecp256k1_v0_10_0_ecmult_gen_compute_table(rustsecp256k1_v0_10_0_ge_storage* table, const rustsecp256k1_v0_10_0_ge* gen, int bits); #endif /* SECP256K1_ECMULT_GEN_COMPUTE_TABLE_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult_gen_compute_table_impl.h b/secp256k1-sys/depend/secp256k1/src/ecmult_gen_compute_table_impl.h index ddae65519..f85c1282d 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult_gen_compute_table_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult_gen_compute_table_impl.h @@ -13,69 +13,69 @@ #include "ecmult_gen.h" #include "util.h" -static void rustsecp256k1_v0_9_2_ecmult_gen_compute_table(rustsecp256k1_v0_9_2_ge_storage* table, const rustsecp256k1_v0_9_2_ge* gen, int bits) { +static void rustsecp256k1_v0_10_0_ecmult_gen_compute_table(rustsecp256k1_v0_10_0_ge_storage* table, const rustsecp256k1_v0_10_0_ge* gen, int bits) { int g = ECMULT_GEN_PREC_G(bits); int n = ECMULT_GEN_PREC_N(bits); - rustsecp256k1_v0_9_2_ge* prec = checked_malloc(&default_error_callback, n * g * sizeof(*prec)); - rustsecp256k1_v0_9_2_gej gj; - rustsecp256k1_v0_9_2_gej nums_gej; + rustsecp256k1_v0_10_0_ge* prec = checked_malloc(&default_error_callback, n * g * sizeof(*prec)); + rustsecp256k1_v0_10_0_gej gj; + rustsecp256k1_v0_10_0_gej nums_gej; int i, j; VERIFY_CHECK(g > 0); VERIFY_CHECK(n > 0); /* get the generator */ - rustsecp256k1_v0_9_2_gej_set_ge(&gj, gen); + rustsecp256k1_v0_10_0_gej_set_ge(&gj, gen); /* Construct a group element with no known corresponding scalar (nothing up my sleeve). */ { static const unsigned char nums_b32[33] = "The scalar for this x is unknown"; - rustsecp256k1_v0_9_2_fe nums_x; - rustsecp256k1_v0_9_2_ge nums_ge; + rustsecp256k1_v0_10_0_fe nums_x; + rustsecp256k1_v0_10_0_ge nums_ge; int r; - r = rustsecp256k1_v0_9_2_fe_set_b32_limit(&nums_x, nums_b32); + r = rustsecp256k1_v0_10_0_fe_set_b32_limit(&nums_x, nums_b32); (void)r; VERIFY_CHECK(r); - r = rustsecp256k1_v0_9_2_ge_set_xo_var(&nums_ge, &nums_x, 0); + r = rustsecp256k1_v0_10_0_ge_set_xo_var(&nums_ge, &nums_x, 0); (void)r; VERIFY_CHECK(r); - rustsecp256k1_v0_9_2_gej_set_ge(&nums_gej, &nums_ge); + rustsecp256k1_v0_10_0_gej_set_ge(&nums_gej, &nums_ge); /* Add G to make the bits in x uniformly distributed. */ - rustsecp256k1_v0_9_2_gej_add_ge_var(&nums_gej, &nums_gej, gen, NULL); + rustsecp256k1_v0_10_0_gej_add_ge_var(&nums_gej, &nums_gej, gen, NULL); } /* compute prec. */ { - rustsecp256k1_v0_9_2_gej gbase; - rustsecp256k1_v0_9_2_gej numsbase; - rustsecp256k1_v0_9_2_gej* precj = checked_malloc(&default_error_callback, n * g * sizeof(*precj)); /* Jacobian versions of prec. */ + rustsecp256k1_v0_10_0_gej gbase; + rustsecp256k1_v0_10_0_gej numsbase; + rustsecp256k1_v0_10_0_gej* precj = checked_malloc(&default_error_callback, n * g * sizeof(*precj)); /* Jacobian versions of prec. */ gbase = gj; /* PREC_G^j * G */ numsbase = nums_gej; /* 2^j * nums. */ for (j = 0; j < n; j++) { /* Set precj[j*PREC_G .. j*PREC_G+(PREC_G-1)] to (numsbase, numsbase + gbase, ..., numsbase + (PREC_G-1)*gbase). */ precj[j*g] = numsbase; for (i = 1; i < g; i++) { - rustsecp256k1_v0_9_2_gej_add_var(&precj[j*g + i], &precj[j*g + i - 1], &gbase, NULL); + rustsecp256k1_v0_10_0_gej_add_var(&precj[j*g + i], &precj[j*g + i - 1], &gbase, NULL); } /* Multiply gbase by PREC_G. */ for (i = 0; i < bits; i++) { - rustsecp256k1_v0_9_2_gej_double_var(&gbase, &gbase, NULL); + rustsecp256k1_v0_10_0_gej_double_var(&gbase, &gbase, NULL); } /* Multiply numbase by 2. */ - rustsecp256k1_v0_9_2_gej_double_var(&numsbase, &numsbase, NULL); + rustsecp256k1_v0_10_0_gej_double_var(&numsbase, &numsbase, NULL); if (j == n - 2) { /* In the last iteration, numsbase is (1 - 2^j) * nums instead. */ - rustsecp256k1_v0_9_2_gej_neg(&numsbase, &numsbase); - rustsecp256k1_v0_9_2_gej_add_var(&numsbase, &numsbase, &nums_gej, NULL); + rustsecp256k1_v0_10_0_gej_neg(&numsbase, &numsbase); + rustsecp256k1_v0_10_0_gej_add_var(&numsbase, &numsbase, &nums_gej, NULL); } } - rustsecp256k1_v0_9_2_ge_set_all_gej_var(prec, precj, n * g); + rustsecp256k1_v0_10_0_ge_set_all_gej_var(prec, precj, n * g); free(precj); } for (j = 0; j < n; j++) { for (i = 0; i < g; i++) { - rustsecp256k1_v0_9_2_ge_to_storage(&table[j*g + i], &prec[j*g + i]); + rustsecp256k1_v0_10_0_ge_to_storage(&table[j*g + i], &prec[j*g + i]); } } free(prec); diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult_gen_impl.h b/secp256k1-sys/depend/secp256k1/src/ecmult_gen_impl.h index 6d8feb401..cbdafe7a7 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult_gen_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult_gen_impl.h @@ -14,19 +14,19 @@ #include "hash_impl.h" #include "precomputed_ecmult_gen.h" -static void rustsecp256k1_v0_9_2_ecmult_gen_context_build(rustsecp256k1_v0_9_2_ecmult_gen_context *ctx) { - rustsecp256k1_v0_9_2_ecmult_gen_blind(ctx, NULL); +static void rustsecp256k1_v0_10_0_ecmult_gen_context_build(rustsecp256k1_v0_10_0_ecmult_gen_context *ctx) { + rustsecp256k1_v0_10_0_ecmult_gen_blind(ctx, NULL); ctx->built = 1; } -static int rustsecp256k1_v0_9_2_ecmult_gen_context_is_built(const rustsecp256k1_v0_9_2_ecmult_gen_context* ctx) { +static int rustsecp256k1_v0_10_0_ecmult_gen_context_is_built(const rustsecp256k1_v0_10_0_ecmult_gen_context* ctx) { return ctx->built; } -static void rustsecp256k1_v0_9_2_ecmult_gen_context_clear(rustsecp256k1_v0_9_2_ecmult_gen_context *ctx) { +static void rustsecp256k1_v0_10_0_ecmult_gen_context_clear(rustsecp256k1_v0_10_0_ecmult_gen_context *ctx) { ctx->built = 0; - rustsecp256k1_v0_9_2_scalar_clear(&ctx->blind); - rustsecp256k1_v0_9_2_gej_clear(&ctx->initial); + rustsecp256k1_v0_10_0_scalar_clear(&ctx->blind); + rustsecp256k1_v0_10_0_gej_clear(&ctx->initial); } /* For accelerating the computation of a*G: @@ -40,25 +40,25 @@ static void rustsecp256k1_v0_9_2_ecmult_gen_context_clear(rustsecp256k1_v0_9_2_e * precomputed (call it prec(i, n_i)). The formula now becomes sum(prec(i, n_i), i=0 ... PREC_N-1). * None of the resulting prec group elements have a known scalar, and neither do any of * the intermediate sums while computing a*G. - * The prec values are stored in rustsecp256k1_v0_9_2_ecmult_gen_prec_table[i][n_i] = n_i * (PREC_G)^i * G + U_i. + * The prec values are stored in rustsecp256k1_v0_10_0_ecmult_gen_prec_table[i][n_i] = n_i * (PREC_G)^i * G + U_i. */ -static void rustsecp256k1_v0_9_2_ecmult_gen(const rustsecp256k1_v0_9_2_ecmult_gen_context *ctx, rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_scalar *gn) { +static void rustsecp256k1_v0_10_0_ecmult_gen(const rustsecp256k1_v0_10_0_ecmult_gen_context *ctx, rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_scalar *gn) { int bits = ECMULT_GEN_PREC_BITS; int g = ECMULT_GEN_PREC_G(bits); int n = ECMULT_GEN_PREC_N(bits); - rustsecp256k1_v0_9_2_ge add; - rustsecp256k1_v0_9_2_ge_storage adds; - rustsecp256k1_v0_9_2_scalar gnb; + rustsecp256k1_v0_10_0_ge add; + rustsecp256k1_v0_10_0_ge_storage adds; + rustsecp256k1_v0_10_0_scalar gnb; int i, j, n_i; memset(&adds, 0, sizeof(adds)); *r = ctx->initial; /* Blind scalar/point multiplication by computing (n-b)G + bG instead of nG. */ - rustsecp256k1_v0_9_2_scalar_add(&gnb, gn, &ctx->blind); + rustsecp256k1_v0_10_0_scalar_add(&gnb, gn, &ctx->blind); add.infinity = 0; for (i = 0; i < n; i++) { - n_i = rustsecp256k1_v0_9_2_scalar_get_bits(&gnb, i * bits, bits); + n_i = rustsecp256k1_v0_10_0_scalar_get_bits(&gnb, i * bits, bits); for (j = 0; j < g; j++) { /** This uses a conditional move to avoid any secret data in array indexes. * _Any_ use of secret indexes has been demonstrated to result in timing @@ -70,61 +70,61 @@ static void rustsecp256k1_v0_9_2_ecmult_gen(const rustsecp256k1_v0_9_2_ecmult_ge * by Dag Arne Osvik, Adi Shamir, and Eran Tromer * (https://www.tau.ac.il/~tromer/papers/cache.pdf) */ - rustsecp256k1_v0_9_2_ge_storage_cmov(&adds, &rustsecp256k1_v0_9_2_ecmult_gen_prec_table[i][j], j == n_i); + rustsecp256k1_v0_10_0_ge_storage_cmov(&adds, &rustsecp256k1_v0_10_0_ecmult_gen_prec_table[i][j], j == n_i); } - rustsecp256k1_v0_9_2_ge_from_storage(&add, &adds); - rustsecp256k1_v0_9_2_gej_add_ge(r, r, &add); + rustsecp256k1_v0_10_0_ge_from_storage(&add, &adds); + rustsecp256k1_v0_10_0_gej_add_ge(r, r, &add); } n_i = 0; - rustsecp256k1_v0_9_2_ge_clear(&add); - rustsecp256k1_v0_9_2_scalar_clear(&gnb); + rustsecp256k1_v0_10_0_ge_clear(&add); + rustsecp256k1_v0_10_0_scalar_clear(&gnb); } -/* Setup blinding values for rustsecp256k1_v0_9_2_ecmult_gen. */ -static void rustsecp256k1_v0_9_2_ecmult_gen_blind(rustsecp256k1_v0_9_2_ecmult_gen_context *ctx, const unsigned char *seed32) { - rustsecp256k1_v0_9_2_scalar b; - rustsecp256k1_v0_9_2_gej gb; - rustsecp256k1_v0_9_2_fe s; +/* Setup blinding values for rustsecp256k1_v0_10_0_ecmult_gen. */ +static void rustsecp256k1_v0_10_0_ecmult_gen_blind(rustsecp256k1_v0_10_0_ecmult_gen_context *ctx, const unsigned char *seed32) { + rustsecp256k1_v0_10_0_scalar b; + rustsecp256k1_v0_10_0_gej gb; + rustsecp256k1_v0_10_0_fe s; unsigned char nonce32[32]; - rustsecp256k1_v0_9_2_rfc6979_hmac_sha256 rng; + rustsecp256k1_v0_10_0_rfc6979_hmac_sha256 rng; unsigned char keydata[64]; if (seed32 == NULL) { /* When seed is NULL, reset the initial point and blinding value. */ - rustsecp256k1_v0_9_2_gej_set_ge(&ctx->initial, &rustsecp256k1_v0_9_2_ge_const_g); - rustsecp256k1_v0_9_2_gej_neg(&ctx->initial, &ctx->initial); - rustsecp256k1_v0_9_2_scalar_set_int(&ctx->blind, 1); + rustsecp256k1_v0_10_0_gej_set_ge(&ctx->initial, &rustsecp256k1_v0_10_0_ge_const_g); + rustsecp256k1_v0_10_0_gej_neg(&ctx->initial, &ctx->initial); + rustsecp256k1_v0_10_0_scalar_set_int(&ctx->blind, 1); return; } /* The prior blinding value (if not reset) is chained forward by including it in the hash. */ - rustsecp256k1_v0_9_2_scalar_get_b32(keydata, &ctx->blind); + rustsecp256k1_v0_10_0_scalar_get_b32(keydata, &ctx->blind); /** Using a CSPRNG allows a failure free interface, avoids needing large amounts of random data, * and guards against weak or adversarial seeds. This is a simpler and safer interface than * asking the caller for blinding values directly and expecting them to retry on failure. */ VERIFY_CHECK(seed32 != NULL); memcpy(keydata + 32, seed32, 32); - rustsecp256k1_v0_9_2_rfc6979_hmac_sha256_initialize(&rng, keydata, 64); + rustsecp256k1_v0_10_0_rfc6979_hmac_sha256_initialize(&rng, keydata, 64); memset(keydata, 0, sizeof(keydata)); - rustsecp256k1_v0_9_2_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); - rustsecp256k1_v0_9_2_fe_set_b32_mod(&s, nonce32); - rustsecp256k1_v0_9_2_fe_cmov(&s, &rustsecp256k1_v0_9_2_fe_one, rustsecp256k1_v0_9_2_fe_normalizes_to_zero(&s)); + rustsecp256k1_v0_10_0_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); + rustsecp256k1_v0_10_0_fe_set_b32_mod(&s, nonce32); + rustsecp256k1_v0_10_0_fe_cmov(&s, &rustsecp256k1_v0_10_0_fe_one, rustsecp256k1_v0_10_0_fe_normalizes_to_zero(&s)); /* Randomize the projection to defend against multiplier sidechannels. - Do this before our own call to rustsecp256k1_v0_9_2_ecmult_gen below. */ - rustsecp256k1_v0_9_2_gej_rescale(&ctx->initial, &s); - rustsecp256k1_v0_9_2_fe_clear(&s); - rustsecp256k1_v0_9_2_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); - rustsecp256k1_v0_9_2_scalar_set_b32(&b, nonce32, NULL); + Do this before our own call to rustsecp256k1_v0_10_0_ecmult_gen below. */ + rustsecp256k1_v0_10_0_gej_rescale(&ctx->initial, &s); + rustsecp256k1_v0_10_0_fe_clear(&s); + rustsecp256k1_v0_10_0_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); + rustsecp256k1_v0_10_0_scalar_set_b32(&b, nonce32, NULL); /* A blinding value of 0 works, but would undermine the projection hardening. */ - rustsecp256k1_v0_9_2_scalar_cmov(&b, &rustsecp256k1_v0_9_2_scalar_one, rustsecp256k1_v0_9_2_scalar_is_zero(&b)); - rustsecp256k1_v0_9_2_rfc6979_hmac_sha256_finalize(&rng); + rustsecp256k1_v0_10_0_scalar_cmov(&b, &rustsecp256k1_v0_10_0_scalar_one, rustsecp256k1_v0_10_0_scalar_is_zero(&b)); + rustsecp256k1_v0_10_0_rfc6979_hmac_sha256_finalize(&rng); memset(nonce32, 0, 32); /* The random projection in ctx->initial ensures that gb will have a random projection. */ - rustsecp256k1_v0_9_2_ecmult_gen(ctx, &gb, &b); - rustsecp256k1_v0_9_2_scalar_negate(&b, &b); + rustsecp256k1_v0_10_0_ecmult_gen(ctx, &gb, &b); + rustsecp256k1_v0_10_0_scalar_negate(&b, &b); ctx->blind = b; ctx->initial = gb; - rustsecp256k1_v0_9_2_scalar_clear(&b); - rustsecp256k1_v0_9_2_gej_clear(&gb); + rustsecp256k1_v0_10_0_scalar_clear(&b); + rustsecp256k1_v0_10_0_gej_clear(&gb); } #endif /* SECP256K1_ECMULT_GEN_IMPL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult_impl.h b/secp256k1-sys/depend/secp256k1/src/ecmult_impl.h index a33bd09a3..dbd5215ae 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult_impl.h @@ -33,8 +33,8 @@ /** Larger values for ECMULT_WINDOW_SIZE result in possibly better * performance at the cost of an exponentially larger precomputed * table. The exact table size is - * (1 << (WINDOW_G - 2)) * sizeof(rustsecp256k1_v0_9_2_ge_storage) bytes, - * where sizeof(rustsecp256k1_v0_9_2_ge_storage) is typically 64 bytes but can + * (1 << (WINDOW_G - 2)) * sizeof(rustsecp256k1_v0_10_0_ge_storage) bytes, + * where sizeof(rustsecp256k1_v0_10_0_ge_storage) is typically 64 bytes but can * be larger due to platform-specific padding and alignment. * Two tables of this size are used (due to the endomorphism * optimization). @@ -70,14 +70,14 @@ * Lastly the zr[0] value, which isn't used above, is set so that: * - a.z = z(pre_a[0]) / zr[0] */ -static void rustsecp256k1_v0_9_2_ecmult_odd_multiples_table(int n, rustsecp256k1_v0_9_2_ge *pre_a, rustsecp256k1_v0_9_2_fe *zr, rustsecp256k1_v0_9_2_fe *z, const rustsecp256k1_v0_9_2_gej *a) { - rustsecp256k1_v0_9_2_gej d, ai; - rustsecp256k1_v0_9_2_ge d_ge; +static void rustsecp256k1_v0_10_0_ecmult_odd_multiples_table(int n, rustsecp256k1_v0_10_0_ge *pre_a, rustsecp256k1_v0_10_0_fe *zr, rustsecp256k1_v0_10_0_fe *z, const rustsecp256k1_v0_10_0_gej *a) { + rustsecp256k1_v0_10_0_gej d, ai; + rustsecp256k1_v0_10_0_ge d_ge; int i; VERIFY_CHECK(!a->infinity); - rustsecp256k1_v0_9_2_gej_double_var(&d, a, NULL); + rustsecp256k1_v0_10_0_gej_double_var(&d, a, NULL); /* * Perform the additions using an isomorphic curve Y^2 = X^3 + 7*C^6 where C := d.z. @@ -90,11 +90,11 @@ static void rustsecp256k1_v0_9_2_ecmult_odd_multiples_table(int n, rustsecp256k1 * * The group addition functions work correctly on these isomorphic curves. * In particular phi(d) is easy to represent in affine coordinates under this isomorphism. - * This lets us use the faster rustsecp256k1_v0_9_2_gej_add_ge_var group addition function that we wouldn't be able to use otherwise. + * This lets us use the faster rustsecp256k1_v0_10_0_gej_add_ge_var group addition function that we wouldn't be able to use otherwise. */ - rustsecp256k1_v0_9_2_ge_set_xy(&d_ge, &d.x, &d.y); - rustsecp256k1_v0_9_2_ge_set_gej_zinv(&pre_a[0], a, &d.z); - rustsecp256k1_v0_9_2_gej_set_ge(&ai, &pre_a[0]); + rustsecp256k1_v0_10_0_ge_set_xy(&d_ge, &d.x, &d.y); + rustsecp256k1_v0_10_0_ge_set_gej_zinv(&pre_a[0], a, &d.z); + rustsecp256k1_v0_10_0_gej_set_ge(&ai, &pre_a[0]); ai.z = a->z; /* pre_a[0] is the point (a.x*C^2, a.y*C^3, a.z*C) which is equivalent to a. @@ -103,18 +103,18 @@ static void rustsecp256k1_v0_9_2_ecmult_odd_multiples_table(int n, rustsecp256k1 zr[0] = d.z; for (i = 1; i < n; i++) { - rustsecp256k1_v0_9_2_gej_add_ge_var(&ai, &ai, &d_ge, &zr[i]); - rustsecp256k1_v0_9_2_ge_set_xy(&pre_a[i], &ai.x, &ai.y); + rustsecp256k1_v0_10_0_gej_add_ge_var(&ai, &ai, &d_ge, &zr[i]); + rustsecp256k1_v0_10_0_ge_set_xy(&pre_a[i], &ai.x, &ai.y); } /* Multiply the last z-coordinate by C to undo the isomorphism. * Since the z-coordinates of the pre_a values are implied by the zr array of z-coordinate ratios, * undoing the isomorphism here undoes the isomorphism for all pre_a values. */ - rustsecp256k1_v0_9_2_fe_mul(z, &ai.z, &d.z); + rustsecp256k1_v0_10_0_fe_mul(z, &ai.z, &d.z); } -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_ecmult_table_verify(int n, int w) { +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_ecmult_table_verify(int n, int w) { (void)n; (void)w; VERIFY_CHECK(((n) & 1) == 1); @@ -122,33 +122,33 @@ SECP256K1_INLINE static void rustsecp256k1_v0_9_2_ecmult_table_verify(int n, int VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); } -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_ecmult_table_get_ge(rustsecp256k1_v0_9_2_ge *r, const rustsecp256k1_v0_9_2_ge *pre, int n, int w) { - rustsecp256k1_v0_9_2_ecmult_table_verify(n,w); +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_ecmult_table_get_ge(rustsecp256k1_v0_10_0_ge *r, const rustsecp256k1_v0_10_0_ge *pre, int n, int w) { + rustsecp256k1_v0_10_0_ecmult_table_verify(n,w); if (n > 0) { *r = pre[(n-1)/2]; } else { *r = pre[(-n-1)/2]; - rustsecp256k1_v0_9_2_fe_negate(&(r->y), &(r->y), 1); + rustsecp256k1_v0_10_0_fe_negate(&(r->y), &(r->y), 1); } } -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_ecmult_table_get_ge_lambda(rustsecp256k1_v0_9_2_ge *r, const rustsecp256k1_v0_9_2_ge *pre, const rustsecp256k1_v0_9_2_fe *x, int n, int w) { - rustsecp256k1_v0_9_2_ecmult_table_verify(n,w); +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_ecmult_table_get_ge_lambda(rustsecp256k1_v0_10_0_ge *r, const rustsecp256k1_v0_10_0_ge *pre, const rustsecp256k1_v0_10_0_fe *x, int n, int w) { + rustsecp256k1_v0_10_0_ecmult_table_verify(n,w); if (n > 0) { - rustsecp256k1_v0_9_2_ge_set_xy(r, &x[(n-1)/2], &pre[(n-1)/2].y); + rustsecp256k1_v0_10_0_ge_set_xy(r, &x[(n-1)/2], &pre[(n-1)/2].y); } else { - rustsecp256k1_v0_9_2_ge_set_xy(r, &x[(-n-1)/2], &pre[(-n-1)/2].y); - rustsecp256k1_v0_9_2_fe_negate(&(r->y), &(r->y), 1); + rustsecp256k1_v0_10_0_ge_set_xy(r, &x[(-n-1)/2], &pre[(-n-1)/2].y); + rustsecp256k1_v0_10_0_fe_negate(&(r->y), &(r->y), 1); } } -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_ecmult_table_get_ge_storage(rustsecp256k1_v0_9_2_ge *r, const rustsecp256k1_v0_9_2_ge_storage *pre, int n, int w) { - rustsecp256k1_v0_9_2_ecmult_table_verify(n,w); +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_ecmult_table_get_ge_storage(rustsecp256k1_v0_10_0_ge *r, const rustsecp256k1_v0_10_0_ge_storage *pre, int n, int w) { + rustsecp256k1_v0_10_0_ecmult_table_verify(n,w); if (n > 0) { - rustsecp256k1_v0_9_2_ge_from_storage(r, &pre[(n-1)/2]); + rustsecp256k1_v0_10_0_ge_from_storage(r, &pre[(n-1)/2]); } else { - rustsecp256k1_v0_9_2_ge_from_storage(r, &pre[(-n-1)/2]); - rustsecp256k1_v0_9_2_fe_negate(&(r->y), &(r->y), 1); + rustsecp256k1_v0_10_0_ge_from_storage(r, &pre[(-n-1)/2]); + rustsecp256k1_v0_10_0_fe_negate(&(r->y), &(r->y), 1); } } @@ -159,8 +159,8 @@ SECP256K1_INLINE static void rustsecp256k1_v0_9_2_ecmult_table_get_ge_storage(ru * - the number of set values in wnaf is returned. This number is at most 256, and at most one more * than the number of bits in the (absolute value) of the input. */ -static int rustsecp256k1_v0_9_2_ecmult_wnaf(int *wnaf, int len, const rustsecp256k1_v0_9_2_scalar *a, int w) { - rustsecp256k1_v0_9_2_scalar s; +static int rustsecp256k1_v0_10_0_ecmult_wnaf(int *wnaf, int len, const rustsecp256k1_v0_10_0_scalar *a, int w) { + rustsecp256k1_v0_10_0_scalar s; int last_set_bit = -1; int bit = 0; int sign = 1; @@ -174,15 +174,15 @@ static int rustsecp256k1_v0_9_2_ecmult_wnaf(int *wnaf, int len, const rustsecp25 memset(wnaf, 0, len * sizeof(wnaf[0])); s = *a; - if (rustsecp256k1_v0_9_2_scalar_get_bits(&s, 255, 1)) { - rustsecp256k1_v0_9_2_scalar_negate(&s, &s); + if (rustsecp256k1_v0_10_0_scalar_get_bits(&s, 255, 1)) { + rustsecp256k1_v0_10_0_scalar_negate(&s, &s); sign = -1; } while (bit < len) { int now; int word; - if (rustsecp256k1_v0_9_2_scalar_get_bits(&s, bit, 1) == (unsigned int)carry) { + if (rustsecp256k1_v0_10_0_scalar_get_bits(&s, bit, 1) == (unsigned int)carry) { bit++; continue; } @@ -192,7 +192,7 @@ static int rustsecp256k1_v0_9_2_ecmult_wnaf(int *wnaf, int len, const rustsecp25 now = len - bit; } - word = rustsecp256k1_v0_9_2_scalar_get_bits_var(&s, bit, now) + carry; + word = rustsecp256k1_v0_10_0_scalar_get_bits_var(&s, bit, now) + carry; carry = (word >> (w-1)) & 1; word -= carry << w; @@ -209,7 +209,7 @@ static int rustsecp256k1_v0_9_2_ecmult_wnaf(int *wnaf, int len, const rustsecp25 VERIFY_CHECK(carry == 0); while (verify_bit < 256) { - VERIFY_CHECK(rustsecp256k1_v0_9_2_scalar_get_bits(&s, verify_bit, 1) == 0); + VERIFY_CHECK(rustsecp256k1_v0_10_0_scalar_get_bits(&s, verify_bit, 1) == 0); verify_bit++; } } @@ -217,25 +217,25 @@ static int rustsecp256k1_v0_9_2_ecmult_wnaf(int *wnaf, int len, const rustsecp25 return last_set_bit + 1; } -struct rustsecp256k1_v0_9_2_strauss_point_state { +struct rustsecp256k1_v0_10_0_strauss_point_state { int wnaf_na_1[129]; int wnaf_na_lam[129]; int bits_na_1; int bits_na_lam; }; -struct rustsecp256k1_v0_9_2_strauss_state { +struct rustsecp256k1_v0_10_0_strauss_state { /* aux is used to hold z-ratios, and then used to hold pre_a[i].x * BETA values. */ - rustsecp256k1_v0_9_2_fe* aux; - rustsecp256k1_v0_9_2_ge* pre_a; - struct rustsecp256k1_v0_9_2_strauss_point_state* ps; + rustsecp256k1_v0_10_0_fe* aux; + rustsecp256k1_v0_10_0_ge* pre_a; + struct rustsecp256k1_v0_10_0_strauss_point_state* ps; }; -static void rustsecp256k1_v0_9_2_ecmult_strauss_wnaf(const struct rustsecp256k1_v0_9_2_strauss_state *state, rustsecp256k1_v0_9_2_gej *r, size_t num, const rustsecp256k1_v0_9_2_gej *a, const rustsecp256k1_v0_9_2_scalar *na, const rustsecp256k1_v0_9_2_scalar *ng) { - rustsecp256k1_v0_9_2_ge tmpa; - rustsecp256k1_v0_9_2_fe Z; +static void rustsecp256k1_v0_10_0_ecmult_strauss_wnaf(const struct rustsecp256k1_v0_10_0_strauss_state *state, rustsecp256k1_v0_10_0_gej *r, size_t num, const rustsecp256k1_v0_10_0_gej *a, const rustsecp256k1_v0_10_0_scalar *na, const rustsecp256k1_v0_10_0_scalar *ng) { + rustsecp256k1_v0_10_0_ge tmpa; + rustsecp256k1_v0_10_0_fe Z; /* Split G factors. */ - rustsecp256k1_v0_9_2_scalar ng_1, ng_128; + rustsecp256k1_v0_10_0_scalar ng_1, ng_128; int wnaf_ng_1[129]; int bits_ng_1 = 0; int wnaf_ng_128[129]; @@ -245,19 +245,19 @@ static void rustsecp256k1_v0_9_2_ecmult_strauss_wnaf(const struct rustsecp256k1_ size_t np; size_t no = 0; - rustsecp256k1_v0_9_2_fe_set_int(&Z, 1); + rustsecp256k1_v0_10_0_fe_set_int(&Z, 1); for (np = 0; np < num; ++np) { - rustsecp256k1_v0_9_2_gej tmp; - rustsecp256k1_v0_9_2_scalar na_1, na_lam; - if (rustsecp256k1_v0_9_2_scalar_is_zero(&na[np]) || rustsecp256k1_v0_9_2_gej_is_infinity(&a[np])) { + rustsecp256k1_v0_10_0_gej tmp; + rustsecp256k1_v0_10_0_scalar na_1, na_lam; + if (rustsecp256k1_v0_10_0_scalar_is_zero(&na[np]) || rustsecp256k1_v0_10_0_gej_is_infinity(&a[np])) { continue; } /* split na into na_1 and na_lam (where na = na_1 + na_lam*lambda, and na_1 and na_lam are ~128 bit) */ - rustsecp256k1_v0_9_2_scalar_split_lambda(&na_1, &na_lam, &na[np]); + rustsecp256k1_v0_10_0_scalar_split_lambda(&na_1, &na_lam, &na[np]); /* build wnaf representation for na_1 and na_lam. */ - state->ps[no].bits_na_1 = rustsecp256k1_v0_9_2_ecmult_wnaf(state->ps[no].wnaf_na_1, 129, &na_1, WINDOW_A); - state->ps[no].bits_na_lam = rustsecp256k1_v0_9_2_ecmult_wnaf(state->ps[no].wnaf_na_lam, 129, &na_lam, WINDOW_A); + state->ps[no].bits_na_1 = rustsecp256k1_v0_10_0_ecmult_wnaf(state->ps[no].wnaf_na_1, 129, &na_1, WINDOW_A); + state->ps[no].bits_na_lam = rustsecp256k1_v0_10_0_ecmult_wnaf(state->ps[no].wnaf_na_lam, 129, &na_lam, WINDOW_A); VERIFY_CHECK(state->ps[no].bits_na_1 <= 129); VERIFY_CHECK(state->ps[no].bits_na_lam <= 129); if (state->ps[no].bits_na_1 > bits) { @@ -274,37 +274,37 @@ static void rustsecp256k1_v0_9_2_ecmult_strauss_wnaf(const struct rustsecp256k1_ * the Z coordinate of the result once at the end. * The exception is the precomputed G table points, which are actually * affine. Compared to the base used for other points, they have a Z ratio - * of 1/Z, so we can use rustsecp256k1_v0_9_2_gej_add_zinv_var, which uses the same + * of 1/Z, so we can use rustsecp256k1_v0_10_0_gej_add_zinv_var, which uses the same * isomorphism to efficiently add with a known Z inverse. */ tmp = a[np]; if (no) { - rustsecp256k1_v0_9_2_gej_rescale(&tmp, &Z); + rustsecp256k1_v0_10_0_gej_rescale(&tmp, &Z); } - rustsecp256k1_v0_9_2_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), state->pre_a + no * ECMULT_TABLE_SIZE(WINDOW_A), state->aux + no * ECMULT_TABLE_SIZE(WINDOW_A), &Z, &tmp); - if (no) rustsecp256k1_v0_9_2_fe_mul(state->aux + no * ECMULT_TABLE_SIZE(WINDOW_A), state->aux + no * ECMULT_TABLE_SIZE(WINDOW_A), &(a[np].z)); + rustsecp256k1_v0_10_0_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), state->pre_a + no * ECMULT_TABLE_SIZE(WINDOW_A), state->aux + no * ECMULT_TABLE_SIZE(WINDOW_A), &Z, &tmp); + if (no) rustsecp256k1_v0_10_0_fe_mul(state->aux + no * ECMULT_TABLE_SIZE(WINDOW_A), state->aux + no * ECMULT_TABLE_SIZE(WINDOW_A), &(a[np].z)); ++no; } /* Bring them to the same Z denominator. */ if (no) { - rustsecp256k1_v0_9_2_ge_table_set_globalz(ECMULT_TABLE_SIZE(WINDOW_A) * no, state->pre_a, state->aux); + rustsecp256k1_v0_10_0_ge_table_set_globalz(ECMULT_TABLE_SIZE(WINDOW_A) * no, state->pre_a, state->aux); } for (np = 0; np < no; ++np) { for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) { - rustsecp256k1_v0_9_2_fe_mul(&state->aux[np * ECMULT_TABLE_SIZE(WINDOW_A) + i], &state->pre_a[np * ECMULT_TABLE_SIZE(WINDOW_A) + i].x, &rustsecp256k1_v0_9_2_const_beta); + rustsecp256k1_v0_10_0_fe_mul(&state->aux[np * ECMULT_TABLE_SIZE(WINDOW_A) + i], &state->pre_a[np * ECMULT_TABLE_SIZE(WINDOW_A) + i].x, &rustsecp256k1_v0_10_0_const_beta); } } if (ng) { /* split ng into ng_1 and ng_128 (where gn = gn_1 + gn_128*2^128, and gn_1 and gn_128 are ~128 bit) */ - rustsecp256k1_v0_9_2_scalar_split_128(&ng_1, &ng_128, ng); + rustsecp256k1_v0_10_0_scalar_split_128(&ng_1, &ng_128, ng); /* Build wnaf representation for ng_1 and ng_128 */ - bits_ng_1 = rustsecp256k1_v0_9_2_ecmult_wnaf(wnaf_ng_1, 129, &ng_1, WINDOW_G); - bits_ng_128 = rustsecp256k1_v0_9_2_ecmult_wnaf(wnaf_ng_128, 129, &ng_128, WINDOW_G); + bits_ng_1 = rustsecp256k1_v0_10_0_ecmult_wnaf(wnaf_ng_1, 129, &ng_1, WINDOW_G); + bits_ng_128 = rustsecp256k1_v0_10_0_ecmult_wnaf(wnaf_ng_128, 129, &ng_128, WINDOW_G); if (bits_ng_1 > bits) { bits = bits_ng_1; } @@ -313,61 +313,61 @@ static void rustsecp256k1_v0_9_2_ecmult_strauss_wnaf(const struct rustsecp256k1_ } } - rustsecp256k1_v0_9_2_gej_set_infinity(r); + rustsecp256k1_v0_10_0_gej_set_infinity(r); for (i = bits - 1; i >= 0; i--) { int n; - rustsecp256k1_v0_9_2_gej_double_var(r, r, NULL); + rustsecp256k1_v0_10_0_gej_double_var(r, r, NULL); for (np = 0; np < no; ++np) { if (i < state->ps[np].bits_na_1 && (n = state->ps[np].wnaf_na_1[i])) { - rustsecp256k1_v0_9_2_ecmult_table_get_ge(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A); - rustsecp256k1_v0_9_2_gej_add_ge_var(r, r, &tmpa, NULL); + rustsecp256k1_v0_10_0_ecmult_table_get_ge(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A); + rustsecp256k1_v0_10_0_gej_add_ge_var(r, r, &tmpa, NULL); } if (i < state->ps[np].bits_na_lam && (n = state->ps[np].wnaf_na_lam[i])) { - rustsecp256k1_v0_9_2_ecmult_table_get_ge_lambda(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), state->aux + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A); - rustsecp256k1_v0_9_2_gej_add_ge_var(r, r, &tmpa, NULL); + rustsecp256k1_v0_10_0_ecmult_table_get_ge_lambda(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), state->aux + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A); + rustsecp256k1_v0_10_0_gej_add_ge_var(r, r, &tmpa, NULL); } } if (i < bits_ng_1 && (n = wnaf_ng_1[i])) { - rustsecp256k1_v0_9_2_ecmult_table_get_ge_storage(&tmpa, rustsecp256k1_v0_9_2_pre_g, n, WINDOW_G); - rustsecp256k1_v0_9_2_gej_add_zinv_var(r, r, &tmpa, &Z); + rustsecp256k1_v0_10_0_ecmult_table_get_ge_storage(&tmpa, rustsecp256k1_v0_10_0_pre_g, n, WINDOW_G); + rustsecp256k1_v0_10_0_gej_add_zinv_var(r, r, &tmpa, &Z); } if (i < bits_ng_128 && (n = wnaf_ng_128[i])) { - rustsecp256k1_v0_9_2_ecmult_table_get_ge_storage(&tmpa, rustsecp256k1_v0_9_2_pre_g_128, n, WINDOW_G); - rustsecp256k1_v0_9_2_gej_add_zinv_var(r, r, &tmpa, &Z); + rustsecp256k1_v0_10_0_ecmult_table_get_ge_storage(&tmpa, rustsecp256k1_v0_10_0_pre_g_128, n, WINDOW_G); + rustsecp256k1_v0_10_0_gej_add_zinv_var(r, r, &tmpa, &Z); } } if (!r->infinity) { - rustsecp256k1_v0_9_2_fe_mul(&r->z, &r->z, &Z); + rustsecp256k1_v0_10_0_fe_mul(&r->z, &r->z, &Z); } } -static void rustsecp256k1_v0_9_2_ecmult(rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_gej *a, const rustsecp256k1_v0_9_2_scalar *na, const rustsecp256k1_v0_9_2_scalar *ng) { - rustsecp256k1_v0_9_2_fe aux[ECMULT_TABLE_SIZE(WINDOW_A)]; - rustsecp256k1_v0_9_2_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)]; - struct rustsecp256k1_v0_9_2_strauss_point_state ps[1]; - struct rustsecp256k1_v0_9_2_strauss_state state; +static void rustsecp256k1_v0_10_0_ecmult(rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_gej *a, const rustsecp256k1_v0_10_0_scalar *na, const rustsecp256k1_v0_10_0_scalar *ng) { + rustsecp256k1_v0_10_0_fe aux[ECMULT_TABLE_SIZE(WINDOW_A)]; + rustsecp256k1_v0_10_0_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)]; + struct rustsecp256k1_v0_10_0_strauss_point_state ps[1]; + struct rustsecp256k1_v0_10_0_strauss_state state; state.aux = aux; state.pre_a = pre_a; state.ps = ps; - rustsecp256k1_v0_9_2_ecmult_strauss_wnaf(&state, r, 1, a, na, ng); + rustsecp256k1_v0_10_0_ecmult_strauss_wnaf(&state, r, 1, a, na, ng); } -static size_t rustsecp256k1_v0_9_2_strauss_scratch_size(size_t n_points) { - static const size_t point_size = (sizeof(rustsecp256k1_v0_9_2_ge) + sizeof(rustsecp256k1_v0_9_2_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct rustsecp256k1_v0_9_2_strauss_point_state) + sizeof(rustsecp256k1_v0_9_2_gej) + sizeof(rustsecp256k1_v0_9_2_scalar); +static size_t rustsecp256k1_v0_10_0_strauss_scratch_size(size_t n_points) { + static const size_t point_size = (sizeof(rustsecp256k1_v0_10_0_ge) + sizeof(rustsecp256k1_v0_10_0_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct rustsecp256k1_v0_10_0_strauss_point_state) + sizeof(rustsecp256k1_v0_10_0_gej) + sizeof(rustsecp256k1_v0_10_0_scalar); return n_points*point_size; } -static int rustsecp256k1_v0_9_2_ecmult_strauss_batch(const rustsecp256k1_v0_9_2_callback* error_callback, rustsecp256k1_v0_9_2_scratch *scratch, rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_scalar *inp_g_sc, rustsecp256k1_v0_9_2_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) { - rustsecp256k1_v0_9_2_gej* points; - rustsecp256k1_v0_9_2_scalar* scalars; - struct rustsecp256k1_v0_9_2_strauss_state state; +static int rustsecp256k1_v0_10_0_ecmult_strauss_batch(const rustsecp256k1_v0_10_0_callback* error_callback, rustsecp256k1_v0_10_0_scratch *scratch, rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_scalar *inp_g_sc, rustsecp256k1_v0_10_0_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) { + rustsecp256k1_v0_10_0_gej* points; + rustsecp256k1_v0_10_0_scalar* scalars; + struct rustsecp256k1_v0_10_0_strauss_state state; size_t i; - const size_t scratch_checkpoint = rustsecp256k1_v0_9_2_scratch_checkpoint(error_callback, scratch); + const size_t scratch_checkpoint = rustsecp256k1_v0_10_0_scratch_checkpoint(error_callback, scratch); - rustsecp256k1_v0_9_2_gej_set_infinity(r); + rustsecp256k1_v0_10_0_gej_set_infinity(r); if (inp_g_sc == NULL && n_points == 0) { return 1; } @@ -375,37 +375,37 @@ static int rustsecp256k1_v0_9_2_ecmult_strauss_batch(const rustsecp256k1_v0_9_2_ /* We allocate STRAUSS_SCRATCH_OBJECTS objects on the scratch space. If these * allocations change, make sure to update the STRAUSS_SCRATCH_OBJECTS * constant and strauss_scratch_size accordingly. */ - points = (rustsecp256k1_v0_9_2_gej*)rustsecp256k1_v0_9_2_scratch_alloc(error_callback, scratch, n_points * sizeof(rustsecp256k1_v0_9_2_gej)); - scalars = (rustsecp256k1_v0_9_2_scalar*)rustsecp256k1_v0_9_2_scratch_alloc(error_callback, scratch, n_points * sizeof(rustsecp256k1_v0_9_2_scalar)); - state.aux = (rustsecp256k1_v0_9_2_fe*)rustsecp256k1_v0_9_2_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(rustsecp256k1_v0_9_2_fe)); - state.pre_a = (rustsecp256k1_v0_9_2_ge*)rustsecp256k1_v0_9_2_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(rustsecp256k1_v0_9_2_ge)); - state.ps = (struct rustsecp256k1_v0_9_2_strauss_point_state*)rustsecp256k1_v0_9_2_scratch_alloc(error_callback, scratch, n_points * sizeof(struct rustsecp256k1_v0_9_2_strauss_point_state)); + points = (rustsecp256k1_v0_10_0_gej*)rustsecp256k1_v0_10_0_scratch_alloc(error_callback, scratch, n_points * sizeof(rustsecp256k1_v0_10_0_gej)); + scalars = (rustsecp256k1_v0_10_0_scalar*)rustsecp256k1_v0_10_0_scratch_alloc(error_callback, scratch, n_points * sizeof(rustsecp256k1_v0_10_0_scalar)); + state.aux = (rustsecp256k1_v0_10_0_fe*)rustsecp256k1_v0_10_0_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(rustsecp256k1_v0_10_0_fe)); + state.pre_a = (rustsecp256k1_v0_10_0_ge*)rustsecp256k1_v0_10_0_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(rustsecp256k1_v0_10_0_ge)); + state.ps = (struct rustsecp256k1_v0_10_0_strauss_point_state*)rustsecp256k1_v0_10_0_scratch_alloc(error_callback, scratch, n_points * sizeof(struct rustsecp256k1_v0_10_0_strauss_point_state)); if (points == NULL || scalars == NULL || state.aux == NULL || state.pre_a == NULL || state.ps == NULL) { - rustsecp256k1_v0_9_2_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + rustsecp256k1_v0_10_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 0; } for (i = 0; i < n_points; i++) { - rustsecp256k1_v0_9_2_ge point; + rustsecp256k1_v0_10_0_ge point; if (!cb(&scalars[i], &point, i+cb_offset, cbdata)) { - rustsecp256k1_v0_9_2_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + rustsecp256k1_v0_10_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 0; } - rustsecp256k1_v0_9_2_gej_set_ge(&points[i], &point); + rustsecp256k1_v0_10_0_gej_set_ge(&points[i], &point); } - rustsecp256k1_v0_9_2_ecmult_strauss_wnaf(&state, r, n_points, points, scalars, inp_g_sc); - rustsecp256k1_v0_9_2_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + rustsecp256k1_v0_10_0_ecmult_strauss_wnaf(&state, r, n_points, points, scalars, inp_g_sc); + rustsecp256k1_v0_10_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 1; } -/* Wrapper for rustsecp256k1_v0_9_2_ecmult_multi_func interface */ -static int rustsecp256k1_v0_9_2_ecmult_strauss_batch_single(const rustsecp256k1_v0_9_2_callback* error_callback, rustsecp256k1_v0_9_2_scratch *scratch, rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_scalar *inp_g_sc, rustsecp256k1_v0_9_2_ecmult_multi_callback cb, void *cbdata, size_t n) { - return rustsecp256k1_v0_9_2_ecmult_strauss_batch(error_callback, scratch, r, inp_g_sc, cb, cbdata, n, 0); +/* Wrapper for rustsecp256k1_v0_10_0_ecmult_multi_func interface */ +static int rustsecp256k1_v0_10_0_ecmult_strauss_batch_single(const rustsecp256k1_v0_10_0_callback* error_callback, rustsecp256k1_v0_10_0_scratch *scratch, rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_scalar *inp_g_sc, rustsecp256k1_v0_10_0_ecmult_multi_callback cb, void *cbdata, size_t n) { + return rustsecp256k1_v0_10_0_ecmult_strauss_batch(error_callback, scratch, r, inp_g_sc, cb, cbdata, n, 0); } -static size_t rustsecp256k1_v0_9_2_strauss_max_points(const rustsecp256k1_v0_9_2_callback* error_callback, rustsecp256k1_v0_9_2_scratch *scratch) { - return rustsecp256k1_v0_9_2_scratch_max_allocation(error_callback, scratch, STRAUSS_SCRATCH_OBJECTS) / rustsecp256k1_v0_9_2_strauss_scratch_size(1); +static size_t rustsecp256k1_v0_10_0_strauss_max_points(const rustsecp256k1_v0_10_0_callback* error_callback, rustsecp256k1_v0_10_0_scratch *scratch) { + return rustsecp256k1_v0_10_0_scratch_max_allocation(error_callback, scratch, STRAUSS_SCRATCH_OBJECTS) / rustsecp256k1_v0_10_0_strauss_scratch_size(1); } /** Convert a number to WNAF notation. @@ -415,25 +415,25 @@ static size_t rustsecp256k1_v0_9_2_strauss_max_points(const rustsecp256k1_v0_9_2 * - the number of words set is always WNAF_SIZE(w) * - the returned skew is 0 or 1 */ -static int rustsecp256k1_v0_9_2_wnaf_fixed(int *wnaf, const rustsecp256k1_v0_9_2_scalar *s, int w) { +static int rustsecp256k1_v0_10_0_wnaf_fixed(int *wnaf, const rustsecp256k1_v0_10_0_scalar *s, int w) { int skew = 0; int pos; int max_pos; int last_w; - const rustsecp256k1_v0_9_2_scalar *work = s; + const rustsecp256k1_v0_10_0_scalar *work = s; - if (rustsecp256k1_v0_9_2_scalar_is_zero(s)) { + if (rustsecp256k1_v0_10_0_scalar_is_zero(s)) { for (pos = 0; pos < WNAF_SIZE(w); pos++) { wnaf[pos] = 0; } return 0; } - if (rustsecp256k1_v0_9_2_scalar_is_even(s)) { + if (rustsecp256k1_v0_10_0_scalar_is_even(s)) { skew = 1; } - wnaf[0] = rustsecp256k1_v0_9_2_scalar_get_bits_var(work, 0, w) + skew; + wnaf[0] = rustsecp256k1_v0_10_0_scalar_get_bits_var(work, 0, w) + skew; /* Compute last window size. Relevant when window size doesn't divide the * number of bits in the scalar */ last_w = WNAF_BITS - (WNAF_SIZE(w) - 1) * w; @@ -441,7 +441,7 @@ static int rustsecp256k1_v0_9_2_wnaf_fixed(int *wnaf, const rustsecp256k1_v0_9_2 /* Store the position of the first nonzero word in max_pos to allow * skipping leading zeros when calculating the wnaf. */ for (pos = WNAF_SIZE(w) - 1; pos > 0; pos--) { - int val = rustsecp256k1_v0_9_2_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w); + int val = rustsecp256k1_v0_10_0_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w); if(val != 0) { break; } @@ -451,7 +451,7 @@ static int rustsecp256k1_v0_9_2_wnaf_fixed(int *wnaf, const rustsecp256k1_v0_9_2 pos = 1; while (pos <= max_pos) { - int val = rustsecp256k1_v0_9_2_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w); + int val = rustsecp256k1_v0_10_0_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w); if ((val & 1) == 0) { wnaf[pos - 1] -= (1 << w); wnaf[pos] = (val + 1); @@ -477,14 +477,14 @@ static int rustsecp256k1_v0_9_2_wnaf_fixed(int *wnaf, const rustsecp256k1_v0_9_2 return skew; } -struct rustsecp256k1_v0_9_2_pippenger_point_state { +struct rustsecp256k1_v0_10_0_pippenger_point_state { int skew_na; size_t input_pos; }; -struct rustsecp256k1_v0_9_2_pippenger_state { +struct rustsecp256k1_v0_10_0_pippenger_state { int *wnaf_na; - struct rustsecp256k1_v0_9_2_pippenger_point_state* ps; + struct rustsecp256k1_v0_10_0_pippenger_point_state* ps; }; /* @@ -494,7 +494,7 @@ struct rustsecp256k1_v0_9_2_pippenger_state { * to the point's wnaf[i]. Second, the buckets are added together such that * r += 1*bucket[0] + 3*bucket[1] + 5*bucket[2] + ... */ -static int rustsecp256k1_v0_9_2_ecmult_pippenger_wnaf(rustsecp256k1_v0_9_2_gej *buckets, int bucket_window, struct rustsecp256k1_v0_9_2_pippenger_state *state, rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_scalar *sc, const rustsecp256k1_v0_9_2_ge *pt, size_t num) { +static int rustsecp256k1_v0_10_0_ecmult_pippenger_wnaf(rustsecp256k1_v0_10_0_gej *buckets, int bucket_window, struct rustsecp256k1_v0_10_0_pippenger_state *state, rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_scalar *sc, const rustsecp256k1_v0_10_0_ge *pt, size_t num) { size_t n_wnaf = WNAF_SIZE(bucket_window+1); size_t np; size_t no = 0; @@ -502,55 +502,55 @@ static int rustsecp256k1_v0_9_2_ecmult_pippenger_wnaf(rustsecp256k1_v0_9_2_gej * int j; for (np = 0; np < num; ++np) { - if (rustsecp256k1_v0_9_2_scalar_is_zero(&sc[np]) || rustsecp256k1_v0_9_2_ge_is_infinity(&pt[np])) { + if (rustsecp256k1_v0_10_0_scalar_is_zero(&sc[np]) || rustsecp256k1_v0_10_0_ge_is_infinity(&pt[np])) { continue; } state->ps[no].input_pos = np; - state->ps[no].skew_na = rustsecp256k1_v0_9_2_wnaf_fixed(&state->wnaf_na[no*n_wnaf], &sc[np], bucket_window+1); + state->ps[no].skew_na = rustsecp256k1_v0_10_0_wnaf_fixed(&state->wnaf_na[no*n_wnaf], &sc[np], bucket_window+1); no++; } - rustsecp256k1_v0_9_2_gej_set_infinity(r); + rustsecp256k1_v0_10_0_gej_set_infinity(r); if (no == 0) { return 1; } for (i = n_wnaf - 1; i >= 0; i--) { - rustsecp256k1_v0_9_2_gej running_sum; + rustsecp256k1_v0_10_0_gej running_sum; for(j = 0; j < ECMULT_TABLE_SIZE(bucket_window+2); j++) { - rustsecp256k1_v0_9_2_gej_set_infinity(&buckets[j]); + rustsecp256k1_v0_10_0_gej_set_infinity(&buckets[j]); } for (np = 0; np < no; ++np) { int n = state->wnaf_na[np*n_wnaf + i]; - struct rustsecp256k1_v0_9_2_pippenger_point_state point_state = state->ps[np]; - rustsecp256k1_v0_9_2_ge tmp; + struct rustsecp256k1_v0_10_0_pippenger_point_state point_state = state->ps[np]; + rustsecp256k1_v0_10_0_ge tmp; int idx; if (i == 0) { /* correct for wnaf skew */ int skew = point_state.skew_na; if (skew) { - rustsecp256k1_v0_9_2_ge_neg(&tmp, &pt[point_state.input_pos]); - rustsecp256k1_v0_9_2_gej_add_ge_var(&buckets[0], &buckets[0], &tmp, NULL); + rustsecp256k1_v0_10_0_ge_neg(&tmp, &pt[point_state.input_pos]); + rustsecp256k1_v0_10_0_gej_add_ge_var(&buckets[0], &buckets[0], &tmp, NULL); } } if (n > 0) { idx = (n - 1)/2; - rustsecp256k1_v0_9_2_gej_add_ge_var(&buckets[idx], &buckets[idx], &pt[point_state.input_pos], NULL); + rustsecp256k1_v0_10_0_gej_add_ge_var(&buckets[idx], &buckets[idx], &pt[point_state.input_pos], NULL); } else if (n < 0) { idx = -(n + 1)/2; - rustsecp256k1_v0_9_2_ge_neg(&tmp, &pt[point_state.input_pos]); - rustsecp256k1_v0_9_2_gej_add_ge_var(&buckets[idx], &buckets[idx], &tmp, NULL); + rustsecp256k1_v0_10_0_ge_neg(&tmp, &pt[point_state.input_pos]); + rustsecp256k1_v0_10_0_gej_add_ge_var(&buckets[idx], &buckets[idx], &tmp, NULL); } } for(j = 0; j < bucket_window; j++) { - rustsecp256k1_v0_9_2_gej_double_var(r, r, NULL); + rustsecp256k1_v0_10_0_gej_double_var(r, r, NULL); } - rustsecp256k1_v0_9_2_gej_set_infinity(&running_sum); + rustsecp256k1_v0_10_0_gej_set_infinity(&running_sum); /* Accumulate the sum: bucket[0] + 3*bucket[1] + 5*bucket[2] + 7*bucket[3] + ... * = bucket[0] + bucket[1] + bucket[2] + bucket[3] + ... * + 2 * (bucket[1] + 2*bucket[2] + 3*bucket[3] + ...) @@ -560,13 +560,13 @@ static int rustsecp256k1_v0_9_2_ecmult_pippenger_wnaf(rustsecp256k1_v0_9_2_gej * * The doubling is done implicitly by deferring the final window doubling (of 'r'). */ for(j = ECMULT_TABLE_SIZE(bucket_window+2) - 1; j > 0; j--) { - rustsecp256k1_v0_9_2_gej_add_var(&running_sum, &running_sum, &buckets[j], NULL); - rustsecp256k1_v0_9_2_gej_add_var(r, r, &running_sum, NULL); + rustsecp256k1_v0_10_0_gej_add_var(&running_sum, &running_sum, &buckets[j], NULL); + rustsecp256k1_v0_10_0_gej_add_var(r, r, &running_sum, NULL); } - rustsecp256k1_v0_9_2_gej_add_var(&running_sum, &running_sum, &buckets[0], NULL); - rustsecp256k1_v0_9_2_gej_double_var(r, r, NULL); - rustsecp256k1_v0_9_2_gej_add_var(r, r, &running_sum, NULL); + rustsecp256k1_v0_10_0_gej_add_var(&running_sum, &running_sum, &buckets[0], NULL); + rustsecp256k1_v0_10_0_gej_double_var(r, r, NULL); + rustsecp256k1_v0_10_0_gej_add_var(r, r, &running_sum, NULL); } return 1; } @@ -575,7 +575,7 @@ static int rustsecp256k1_v0_9_2_ecmult_pippenger_wnaf(rustsecp256k1_v0_9_2_gej * * Returns optimal bucket_window (number of bits of a scalar represented by a * set of buckets) for a given number of points. */ -static int rustsecp256k1_v0_9_2_pippenger_bucket_window(size_t n) { +static int rustsecp256k1_v0_10_0_pippenger_bucket_window(size_t n) { if (n <= 1) { return 1; } else if (n <= 4) { @@ -604,7 +604,7 @@ static int rustsecp256k1_v0_9_2_pippenger_bucket_window(size_t n) { /** * Returns the maximum optimal number of points for a bucket_window. */ -static size_t rustsecp256k1_v0_9_2_pippenger_bucket_window_inv(int bucket_window) { +static size_t rustsecp256k1_v0_10_0_pippenger_bucket_window_inv(int bucket_window) { switch(bucket_window) { case 1: return 1; case 2: return 4; @@ -623,18 +623,18 @@ static size_t rustsecp256k1_v0_9_2_pippenger_bucket_window_inv(int bucket_window } -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_ecmult_endo_split(rustsecp256k1_v0_9_2_scalar *s1, rustsecp256k1_v0_9_2_scalar *s2, rustsecp256k1_v0_9_2_ge *p1, rustsecp256k1_v0_9_2_ge *p2) { - rustsecp256k1_v0_9_2_scalar tmp = *s1; - rustsecp256k1_v0_9_2_scalar_split_lambda(s1, s2, &tmp); - rustsecp256k1_v0_9_2_ge_mul_lambda(p2, p1); +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_ecmult_endo_split(rustsecp256k1_v0_10_0_scalar *s1, rustsecp256k1_v0_10_0_scalar *s2, rustsecp256k1_v0_10_0_ge *p1, rustsecp256k1_v0_10_0_ge *p2) { + rustsecp256k1_v0_10_0_scalar tmp = *s1; + rustsecp256k1_v0_10_0_scalar_split_lambda(s1, s2, &tmp); + rustsecp256k1_v0_10_0_ge_mul_lambda(p2, p1); - if (rustsecp256k1_v0_9_2_scalar_is_high(s1)) { - rustsecp256k1_v0_9_2_scalar_negate(s1, s1); - rustsecp256k1_v0_9_2_ge_neg(p1, p1); + if (rustsecp256k1_v0_10_0_scalar_is_high(s1)) { + rustsecp256k1_v0_10_0_scalar_negate(s1, s1); + rustsecp256k1_v0_10_0_ge_neg(p1, p1); } - if (rustsecp256k1_v0_9_2_scalar_is_high(s2)) { - rustsecp256k1_v0_9_2_scalar_negate(s2, s2); - rustsecp256k1_v0_9_2_ge_neg(p2, p2); + if (rustsecp256k1_v0_10_0_scalar_is_high(s2)) { + rustsecp256k1_v0_10_0_scalar_negate(s2, s2); + rustsecp256k1_v0_10_0_ge_neg(p2, p2); } } @@ -642,91 +642,91 @@ SECP256K1_INLINE static void rustsecp256k1_v0_9_2_ecmult_endo_split(rustsecp256k * Returns the scratch size required for a given number of points (excluding * base point G) without considering alignment. */ -static size_t rustsecp256k1_v0_9_2_pippenger_scratch_size(size_t n_points, int bucket_window) { +static size_t rustsecp256k1_v0_10_0_pippenger_scratch_size(size_t n_points, int bucket_window) { size_t entries = 2*n_points + 2; - size_t entry_size = sizeof(rustsecp256k1_v0_9_2_ge) + sizeof(rustsecp256k1_v0_9_2_scalar) + sizeof(struct rustsecp256k1_v0_9_2_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int); - return (sizeof(rustsecp256k1_v0_9_2_gej) << bucket_window) + sizeof(struct rustsecp256k1_v0_9_2_pippenger_state) + entries * entry_size; + size_t entry_size = sizeof(rustsecp256k1_v0_10_0_ge) + sizeof(rustsecp256k1_v0_10_0_scalar) + sizeof(struct rustsecp256k1_v0_10_0_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int); + return (sizeof(rustsecp256k1_v0_10_0_gej) << bucket_window) + sizeof(struct rustsecp256k1_v0_10_0_pippenger_state) + entries * entry_size; } -static int rustsecp256k1_v0_9_2_ecmult_pippenger_batch(const rustsecp256k1_v0_9_2_callback* error_callback, rustsecp256k1_v0_9_2_scratch *scratch, rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_scalar *inp_g_sc, rustsecp256k1_v0_9_2_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) { - const size_t scratch_checkpoint = rustsecp256k1_v0_9_2_scratch_checkpoint(error_callback, scratch); +static int rustsecp256k1_v0_10_0_ecmult_pippenger_batch(const rustsecp256k1_v0_10_0_callback* error_callback, rustsecp256k1_v0_10_0_scratch *scratch, rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_scalar *inp_g_sc, rustsecp256k1_v0_10_0_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) { + const size_t scratch_checkpoint = rustsecp256k1_v0_10_0_scratch_checkpoint(error_callback, scratch); /* Use 2(n+1) with the endomorphism, when calculating batch * sizes. The reason for +1 is that we add the G scalar to the list of * other scalars. */ size_t entries = 2*n_points + 2; - rustsecp256k1_v0_9_2_ge *points; - rustsecp256k1_v0_9_2_scalar *scalars; - rustsecp256k1_v0_9_2_gej *buckets; - struct rustsecp256k1_v0_9_2_pippenger_state *state_space; + rustsecp256k1_v0_10_0_ge *points; + rustsecp256k1_v0_10_0_scalar *scalars; + rustsecp256k1_v0_10_0_gej *buckets; + struct rustsecp256k1_v0_10_0_pippenger_state *state_space; size_t idx = 0; size_t point_idx = 0; int i, j; int bucket_window; - rustsecp256k1_v0_9_2_gej_set_infinity(r); + rustsecp256k1_v0_10_0_gej_set_infinity(r); if (inp_g_sc == NULL && n_points == 0) { return 1; } - bucket_window = rustsecp256k1_v0_9_2_pippenger_bucket_window(n_points); + bucket_window = rustsecp256k1_v0_10_0_pippenger_bucket_window(n_points); /* We allocate PIPPENGER_SCRATCH_OBJECTS objects on the scratch space. If * these allocations change, make sure to update the * PIPPENGER_SCRATCH_OBJECTS constant and pippenger_scratch_size * accordingly. */ - points = (rustsecp256k1_v0_9_2_ge *) rustsecp256k1_v0_9_2_scratch_alloc(error_callback, scratch, entries * sizeof(*points)); - scalars = (rustsecp256k1_v0_9_2_scalar *) rustsecp256k1_v0_9_2_scratch_alloc(error_callback, scratch, entries * sizeof(*scalars)); - state_space = (struct rustsecp256k1_v0_9_2_pippenger_state *) rustsecp256k1_v0_9_2_scratch_alloc(error_callback, scratch, sizeof(*state_space)); + points = (rustsecp256k1_v0_10_0_ge *) rustsecp256k1_v0_10_0_scratch_alloc(error_callback, scratch, entries * sizeof(*points)); + scalars = (rustsecp256k1_v0_10_0_scalar *) rustsecp256k1_v0_10_0_scratch_alloc(error_callback, scratch, entries * sizeof(*scalars)); + state_space = (struct rustsecp256k1_v0_10_0_pippenger_state *) rustsecp256k1_v0_10_0_scratch_alloc(error_callback, scratch, sizeof(*state_space)); if (points == NULL || scalars == NULL || state_space == NULL) { - rustsecp256k1_v0_9_2_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + rustsecp256k1_v0_10_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 0; } - state_space->ps = (struct rustsecp256k1_v0_9_2_pippenger_point_state *) rustsecp256k1_v0_9_2_scratch_alloc(error_callback, scratch, entries * sizeof(*state_space->ps)); - state_space->wnaf_na = (int *) rustsecp256k1_v0_9_2_scratch_alloc(error_callback, scratch, entries*(WNAF_SIZE(bucket_window+1)) * sizeof(int)); - buckets = (rustsecp256k1_v0_9_2_gej *) rustsecp256k1_v0_9_2_scratch_alloc(error_callback, scratch, ((size_t)1 << bucket_window) * sizeof(*buckets)); + state_space->ps = (struct rustsecp256k1_v0_10_0_pippenger_point_state *) rustsecp256k1_v0_10_0_scratch_alloc(error_callback, scratch, entries * sizeof(*state_space->ps)); + state_space->wnaf_na = (int *) rustsecp256k1_v0_10_0_scratch_alloc(error_callback, scratch, entries*(WNAF_SIZE(bucket_window+1)) * sizeof(int)); + buckets = (rustsecp256k1_v0_10_0_gej *) rustsecp256k1_v0_10_0_scratch_alloc(error_callback, scratch, ((size_t)1 << bucket_window) * sizeof(*buckets)); if (state_space->ps == NULL || state_space->wnaf_na == NULL || buckets == NULL) { - rustsecp256k1_v0_9_2_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + rustsecp256k1_v0_10_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 0; } if (inp_g_sc != NULL) { scalars[0] = *inp_g_sc; - points[0] = rustsecp256k1_v0_9_2_ge_const_g; + points[0] = rustsecp256k1_v0_10_0_ge_const_g; idx++; - rustsecp256k1_v0_9_2_ecmult_endo_split(&scalars[0], &scalars[1], &points[0], &points[1]); + rustsecp256k1_v0_10_0_ecmult_endo_split(&scalars[0], &scalars[1], &points[0], &points[1]); idx++; } while (point_idx < n_points) { if (!cb(&scalars[idx], &points[idx], point_idx + cb_offset, cbdata)) { - rustsecp256k1_v0_9_2_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + rustsecp256k1_v0_10_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 0; } idx++; - rustsecp256k1_v0_9_2_ecmult_endo_split(&scalars[idx - 1], &scalars[idx], &points[idx - 1], &points[idx]); + rustsecp256k1_v0_10_0_ecmult_endo_split(&scalars[idx - 1], &scalars[idx], &points[idx - 1], &points[idx]); idx++; point_idx++; } - rustsecp256k1_v0_9_2_ecmult_pippenger_wnaf(buckets, bucket_window, state_space, r, scalars, points, idx); + rustsecp256k1_v0_10_0_ecmult_pippenger_wnaf(buckets, bucket_window, state_space, r, scalars, points, idx); /* Clear data */ for(i = 0; (size_t)i < idx; i++) { - rustsecp256k1_v0_9_2_scalar_clear(&scalars[i]); + rustsecp256k1_v0_10_0_scalar_clear(&scalars[i]); state_space->ps[i].skew_na = 0; for(j = 0; j < WNAF_SIZE(bucket_window+1); j++) { state_space->wnaf_na[i * WNAF_SIZE(bucket_window+1) + j] = 0; } } for(i = 0; i < 1< max_alloc) { break; } @@ -770,32 +770,32 @@ static size_t rustsecp256k1_v0_9_2_pippenger_max_points(const rustsecp256k1_v0_9 /* Computes ecmult_multi by simply multiplying and adding each point. Does not * require a scratch space */ -static int rustsecp256k1_v0_9_2_ecmult_multi_simple_var(rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_scalar *inp_g_sc, rustsecp256k1_v0_9_2_ecmult_multi_callback cb, void *cbdata, size_t n_points) { +static int rustsecp256k1_v0_10_0_ecmult_multi_simple_var(rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_scalar *inp_g_sc, rustsecp256k1_v0_10_0_ecmult_multi_callback cb, void *cbdata, size_t n_points) { size_t point_idx; - rustsecp256k1_v0_9_2_gej tmpj; + rustsecp256k1_v0_10_0_gej tmpj; - rustsecp256k1_v0_9_2_gej_set_infinity(r); - rustsecp256k1_v0_9_2_gej_set_infinity(&tmpj); + rustsecp256k1_v0_10_0_gej_set_infinity(r); + rustsecp256k1_v0_10_0_gej_set_infinity(&tmpj); /* r = inp_g_sc*G */ - rustsecp256k1_v0_9_2_ecmult(r, &tmpj, &rustsecp256k1_v0_9_2_scalar_zero, inp_g_sc); + rustsecp256k1_v0_10_0_ecmult(r, &tmpj, &rustsecp256k1_v0_10_0_scalar_zero, inp_g_sc); for (point_idx = 0; point_idx < n_points; point_idx++) { - rustsecp256k1_v0_9_2_ge point; - rustsecp256k1_v0_9_2_gej pointj; - rustsecp256k1_v0_9_2_scalar scalar; + rustsecp256k1_v0_10_0_ge point; + rustsecp256k1_v0_10_0_gej pointj; + rustsecp256k1_v0_10_0_scalar scalar; if (!cb(&scalar, &point, point_idx, cbdata)) { return 0; } /* r += scalar*point */ - rustsecp256k1_v0_9_2_gej_set_ge(&pointj, &point); - rustsecp256k1_v0_9_2_ecmult(&tmpj, &pointj, &scalar, NULL); - rustsecp256k1_v0_9_2_gej_add_var(r, r, &tmpj, NULL); + rustsecp256k1_v0_10_0_gej_set_ge(&pointj, &point); + rustsecp256k1_v0_10_0_ecmult(&tmpj, &pointj, &scalar, NULL); + rustsecp256k1_v0_10_0_gej_add_var(r, r, &tmpj, NULL); } return 1; } /* Compute the number of batches and the batch size given the maximum batch size and the * total number of points */ -static int rustsecp256k1_v0_9_2_ecmult_multi_batch_size_helper(size_t *n_batches, size_t *n_batch_points, size_t max_n_batch_points, size_t n) { +static int rustsecp256k1_v0_10_0_ecmult_multi_batch_size_helper(size_t *n_batches, size_t *n_batch_points, size_t max_n_batch_points, size_t n) { if (max_n_batch_points == 0) { return 0; } @@ -813,48 +813,48 @@ static int rustsecp256k1_v0_9_2_ecmult_multi_batch_size_helper(size_t *n_batches return 1; } -typedef int (*rustsecp256k1_v0_9_2_ecmult_multi_func)(const rustsecp256k1_v0_9_2_callback* error_callback, rustsecp256k1_v0_9_2_scratch*, rustsecp256k1_v0_9_2_gej*, const rustsecp256k1_v0_9_2_scalar*, rustsecp256k1_v0_9_2_ecmult_multi_callback cb, void*, size_t); -static int rustsecp256k1_v0_9_2_ecmult_multi_var(const rustsecp256k1_v0_9_2_callback* error_callback, rustsecp256k1_v0_9_2_scratch *scratch, rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_scalar *inp_g_sc, rustsecp256k1_v0_9_2_ecmult_multi_callback cb, void *cbdata, size_t n) { +typedef int (*rustsecp256k1_v0_10_0_ecmult_multi_func)(const rustsecp256k1_v0_10_0_callback* error_callback, rustsecp256k1_v0_10_0_scratch*, rustsecp256k1_v0_10_0_gej*, const rustsecp256k1_v0_10_0_scalar*, rustsecp256k1_v0_10_0_ecmult_multi_callback cb, void*, size_t); +static int rustsecp256k1_v0_10_0_ecmult_multi_var(const rustsecp256k1_v0_10_0_callback* error_callback, rustsecp256k1_v0_10_0_scratch *scratch, rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_scalar *inp_g_sc, rustsecp256k1_v0_10_0_ecmult_multi_callback cb, void *cbdata, size_t n) { size_t i; - int (*f)(const rustsecp256k1_v0_9_2_callback* error_callback, rustsecp256k1_v0_9_2_scratch*, rustsecp256k1_v0_9_2_gej*, const rustsecp256k1_v0_9_2_scalar*, rustsecp256k1_v0_9_2_ecmult_multi_callback cb, void*, size_t, size_t); + int (*f)(const rustsecp256k1_v0_10_0_callback* error_callback, rustsecp256k1_v0_10_0_scratch*, rustsecp256k1_v0_10_0_gej*, const rustsecp256k1_v0_10_0_scalar*, rustsecp256k1_v0_10_0_ecmult_multi_callback cb, void*, size_t, size_t); size_t n_batches; size_t n_batch_points; - rustsecp256k1_v0_9_2_gej_set_infinity(r); + rustsecp256k1_v0_10_0_gej_set_infinity(r); if (inp_g_sc == NULL && n == 0) { return 1; } else if (n == 0) { - rustsecp256k1_v0_9_2_ecmult(r, r, &rustsecp256k1_v0_9_2_scalar_zero, inp_g_sc); + rustsecp256k1_v0_10_0_ecmult(r, r, &rustsecp256k1_v0_10_0_scalar_zero, inp_g_sc); return 1; } if (scratch == NULL) { - return rustsecp256k1_v0_9_2_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n); + return rustsecp256k1_v0_10_0_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n); } /* Compute the batch sizes for Pippenger's algorithm given a scratch space. If it's greater than * a threshold use Pippenger's algorithm. Otherwise use Strauss' algorithm. * As a first step check if there's enough space for Pippenger's algo (which requires less space * than Strauss' algo) and if not, use the simple algorithm. */ - if (!rustsecp256k1_v0_9_2_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, rustsecp256k1_v0_9_2_pippenger_max_points(error_callback, scratch), n)) { - return rustsecp256k1_v0_9_2_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n); + if (!rustsecp256k1_v0_10_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, rustsecp256k1_v0_10_0_pippenger_max_points(error_callback, scratch), n)) { + return rustsecp256k1_v0_10_0_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n); } if (n_batch_points >= ECMULT_PIPPENGER_THRESHOLD) { - f = rustsecp256k1_v0_9_2_ecmult_pippenger_batch; + f = rustsecp256k1_v0_10_0_ecmult_pippenger_batch; } else { - if (!rustsecp256k1_v0_9_2_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, rustsecp256k1_v0_9_2_strauss_max_points(error_callback, scratch), n)) { - return rustsecp256k1_v0_9_2_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n); + if (!rustsecp256k1_v0_10_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, rustsecp256k1_v0_10_0_strauss_max_points(error_callback, scratch), n)) { + return rustsecp256k1_v0_10_0_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n); } - f = rustsecp256k1_v0_9_2_ecmult_strauss_batch; + f = rustsecp256k1_v0_10_0_ecmult_strauss_batch; } for(i = 0; i < n_batches; i++) { size_t nbp = n < n_batch_points ? n : n_batch_points; size_t offset = n_batch_points*i; - rustsecp256k1_v0_9_2_gej tmp; + rustsecp256k1_v0_10_0_gej tmp; if (!f(error_callback, scratch, &tmp, i == 0 ? inp_g_sc : NULL, cb, cbdata, nbp, offset)) { return 0; } - rustsecp256k1_v0_9_2_gej_add_var(r, r, &tmp, NULL); + rustsecp256k1_v0_10_0_gej_add_var(r, r, &tmp, NULL); n -= nbp; } return 1; diff --git a/secp256k1-sys/depend/secp256k1/src/field.h b/secp256k1-sys/depend/secp256k1/src/field.h index ebe3c2d72..e0064615f 100644 --- a/secp256k1-sys/depend/secp256k1/src/field.h +++ b/secp256k1-sys/depend/secp256k1/src/field.h @@ -9,15 +9,15 @@ #include "util.h" -/* This file defines the generic interface for working with rustsecp256k1_v0_9_2_fe +/* This file defines the generic interface for working with rustsecp256k1_v0_10_0_fe * objects, which represent field elements (integers modulo 2^256 - 2^32 - 977). * - * The actual definition of the rustsecp256k1_v0_9_2_fe type depends on the chosen field + * The actual definition of the rustsecp256k1_v0_10_0_fe type depends on the chosen field * implementation; see the field_5x52.h and field_10x26.h files for details. * - * All rustsecp256k1_v0_9_2_fe objects have implicit properties that determine what + * All rustsecp256k1_v0_10_0_fe objects have implicit properties that determine what * operations are permitted on it. These are purely a function of what - * rustsecp256k1_v0_9_2_fe_ operations are applied on it, generally (implicitly) fixed at + * rustsecp256k1_v0_10_0_fe_ operations are applied on it, generally (implicitly) fixed at * compile time, and do not depend on the chosen field implementation. Despite * that, what these properties actually entail for the field representation * values depends on the chosen field implementation. These properties are: @@ -26,7 +26,7 @@ * * In VERIFY mode, they are materialized explicitly as fields in the struct, * allowing run-time verification of these properties. In that case, the field - * implementation also provides a rustsecp256k1_v0_9_2_fe_verify routine to verify that + * implementation also provides a rustsecp256k1_v0_10_0_fe_verify routine to verify that * these fields match the run-time value and perform internal consistency * checks. */ #ifdef VERIFY @@ -56,7 +56,7 @@ #define SECP256K1_FE_VERIFY_CONST(d7, d6, d5, d4, d3, d2, d1, d0) #endif -/** This expands to an initializer for a rustsecp256k1_v0_9_2_fe valued sum((i*32) * d_i, i=0..7) mod p. +/** This expands to an initializer for a rustsecp256k1_v0_10_0_fe valued sum((i*32) * d_i, i=0..7) mod p. * * It has magnitude 1, unless d_i are all 0, in which case the magnitude is 0. * It is normalized, unless sum(2^(i*32) * d_i, i=0..7) >= p. @@ -65,8 +65,8 @@ */ #define SECP256K1_FE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {SECP256K1_FE_CONST_INNER((d7), (d6), (d5), (d4), (d3), (d2), (d1), (d0)) SECP256K1_FE_VERIFY_CONST((d7), (d6), (d5), (d4), (d3), (d2), (d1), (d0)) } -static const rustsecp256k1_v0_9_2_fe rustsecp256k1_v0_9_2_fe_one = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1); -static const rustsecp256k1_v0_9_2_fe rustsecp256k1_v0_9_2_const_beta = SECP256K1_FE_CONST( +static const rustsecp256k1_v0_10_0_fe rustsecp256k1_v0_10_0_fe_one = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1); +static const rustsecp256k1_v0_10_0_fe rustsecp256k1_v0_10_0_const_beta = SECP256K1_FE_CONST( 0x7ae96a2bul, 0x657c0710ul, 0x6e64479eul, 0xac3434e9ul, 0x9cf04975ul, 0x12f58995ul, 0xc1396c28ul, 0x719501eeul ); @@ -75,33 +75,33 @@ static const rustsecp256k1_v0_9_2_fe rustsecp256k1_v0_9_2_const_beta = SECP256K1 /* In non-VERIFY mode, we #define the fe operations to be identical to their * internal field implementation, to avoid the potential overhead of a * function call (even though presumably inlinable). */ -# define rustsecp256k1_v0_9_2_fe_normalize rustsecp256k1_v0_9_2_fe_impl_normalize -# define rustsecp256k1_v0_9_2_fe_normalize_weak rustsecp256k1_v0_9_2_fe_impl_normalize_weak -# define rustsecp256k1_v0_9_2_fe_normalize_var rustsecp256k1_v0_9_2_fe_impl_normalize_var -# define rustsecp256k1_v0_9_2_fe_normalizes_to_zero rustsecp256k1_v0_9_2_fe_impl_normalizes_to_zero -# define rustsecp256k1_v0_9_2_fe_normalizes_to_zero_var rustsecp256k1_v0_9_2_fe_impl_normalizes_to_zero_var -# define rustsecp256k1_v0_9_2_fe_set_int rustsecp256k1_v0_9_2_fe_impl_set_int -# define rustsecp256k1_v0_9_2_fe_clear rustsecp256k1_v0_9_2_fe_impl_clear -# define rustsecp256k1_v0_9_2_fe_is_zero rustsecp256k1_v0_9_2_fe_impl_is_zero -# define rustsecp256k1_v0_9_2_fe_is_odd rustsecp256k1_v0_9_2_fe_impl_is_odd -# define rustsecp256k1_v0_9_2_fe_cmp_var rustsecp256k1_v0_9_2_fe_impl_cmp_var -# define rustsecp256k1_v0_9_2_fe_set_b32_mod rustsecp256k1_v0_9_2_fe_impl_set_b32_mod -# define rustsecp256k1_v0_9_2_fe_set_b32_limit rustsecp256k1_v0_9_2_fe_impl_set_b32_limit -# define rustsecp256k1_v0_9_2_fe_get_b32 rustsecp256k1_v0_9_2_fe_impl_get_b32 -# define rustsecp256k1_v0_9_2_fe_negate_unchecked rustsecp256k1_v0_9_2_fe_impl_negate_unchecked -# define rustsecp256k1_v0_9_2_fe_mul_int_unchecked rustsecp256k1_v0_9_2_fe_impl_mul_int_unchecked -# define rustsecp256k1_v0_9_2_fe_add rustsecp256k1_v0_9_2_fe_impl_add -# define rustsecp256k1_v0_9_2_fe_mul rustsecp256k1_v0_9_2_fe_impl_mul -# define rustsecp256k1_v0_9_2_fe_sqr rustsecp256k1_v0_9_2_fe_impl_sqr -# define rustsecp256k1_v0_9_2_fe_cmov rustsecp256k1_v0_9_2_fe_impl_cmov -# define rustsecp256k1_v0_9_2_fe_to_storage rustsecp256k1_v0_9_2_fe_impl_to_storage -# define rustsecp256k1_v0_9_2_fe_from_storage rustsecp256k1_v0_9_2_fe_impl_from_storage -# define rustsecp256k1_v0_9_2_fe_inv rustsecp256k1_v0_9_2_fe_impl_inv -# define rustsecp256k1_v0_9_2_fe_inv_var rustsecp256k1_v0_9_2_fe_impl_inv_var -# define rustsecp256k1_v0_9_2_fe_get_bounds rustsecp256k1_v0_9_2_fe_impl_get_bounds -# define rustsecp256k1_v0_9_2_fe_half rustsecp256k1_v0_9_2_fe_impl_half -# define rustsecp256k1_v0_9_2_fe_add_int rustsecp256k1_v0_9_2_fe_impl_add_int -# define rustsecp256k1_v0_9_2_fe_is_square_var rustsecp256k1_v0_9_2_fe_impl_is_square_var +# define rustsecp256k1_v0_10_0_fe_normalize rustsecp256k1_v0_10_0_fe_impl_normalize +# define rustsecp256k1_v0_10_0_fe_normalize_weak rustsecp256k1_v0_10_0_fe_impl_normalize_weak +# define rustsecp256k1_v0_10_0_fe_normalize_var rustsecp256k1_v0_10_0_fe_impl_normalize_var +# define rustsecp256k1_v0_10_0_fe_normalizes_to_zero rustsecp256k1_v0_10_0_fe_impl_normalizes_to_zero +# define rustsecp256k1_v0_10_0_fe_normalizes_to_zero_var rustsecp256k1_v0_10_0_fe_impl_normalizes_to_zero_var +# define rustsecp256k1_v0_10_0_fe_set_int rustsecp256k1_v0_10_0_fe_impl_set_int +# define rustsecp256k1_v0_10_0_fe_clear rustsecp256k1_v0_10_0_fe_impl_clear +# define rustsecp256k1_v0_10_0_fe_is_zero rustsecp256k1_v0_10_0_fe_impl_is_zero +# define rustsecp256k1_v0_10_0_fe_is_odd rustsecp256k1_v0_10_0_fe_impl_is_odd +# define rustsecp256k1_v0_10_0_fe_cmp_var rustsecp256k1_v0_10_0_fe_impl_cmp_var +# define rustsecp256k1_v0_10_0_fe_set_b32_mod rustsecp256k1_v0_10_0_fe_impl_set_b32_mod +# define rustsecp256k1_v0_10_0_fe_set_b32_limit rustsecp256k1_v0_10_0_fe_impl_set_b32_limit +# define rustsecp256k1_v0_10_0_fe_get_b32 rustsecp256k1_v0_10_0_fe_impl_get_b32 +# define rustsecp256k1_v0_10_0_fe_negate_unchecked rustsecp256k1_v0_10_0_fe_impl_negate_unchecked +# define rustsecp256k1_v0_10_0_fe_mul_int_unchecked rustsecp256k1_v0_10_0_fe_impl_mul_int_unchecked +# define rustsecp256k1_v0_10_0_fe_add rustsecp256k1_v0_10_0_fe_impl_add +# define rustsecp256k1_v0_10_0_fe_mul rustsecp256k1_v0_10_0_fe_impl_mul +# define rustsecp256k1_v0_10_0_fe_sqr rustsecp256k1_v0_10_0_fe_impl_sqr +# define rustsecp256k1_v0_10_0_fe_cmov rustsecp256k1_v0_10_0_fe_impl_cmov +# define rustsecp256k1_v0_10_0_fe_to_storage rustsecp256k1_v0_10_0_fe_impl_to_storage +# define rustsecp256k1_v0_10_0_fe_from_storage rustsecp256k1_v0_10_0_fe_impl_from_storage +# define rustsecp256k1_v0_10_0_fe_inv rustsecp256k1_v0_10_0_fe_impl_inv +# define rustsecp256k1_v0_10_0_fe_inv_var rustsecp256k1_v0_10_0_fe_impl_inv_var +# define rustsecp256k1_v0_10_0_fe_get_bounds rustsecp256k1_v0_10_0_fe_impl_get_bounds +# define rustsecp256k1_v0_10_0_fe_half rustsecp256k1_v0_10_0_fe_impl_half +# define rustsecp256k1_v0_10_0_fe_add_int rustsecp256k1_v0_10_0_fe_impl_add_int +# define rustsecp256k1_v0_10_0_fe_is_square_var rustsecp256k1_v0_10_0_fe_impl_is_square_var #endif /* !defined(VERIFY) */ /** Normalize a field element. @@ -109,64 +109,64 @@ static const rustsecp256k1_v0_9_2_fe rustsecp256k1_v0_9_2_const_beta = SECP256K1 * On input, r must be a valid field element. * On output, r represents the same value but has normalized=1 and magnitude=1. */ -static void rustsecp256k1_v0_9_2_fe_normalize(rustsecp256k1_v0_9_2_fe *r); +static void rustsecp256k1_v0_10_0_fe_normalize(rustsecp256k1_v0_10_0_fe *r); /** Give a field element magnitude 1. * * On input, r must be a valid field element. * On output, r represents the same value but has magnitude=1. Normalized is unchanged. */ -static void rustsecp256k1_v0_9_2_fe_normalize_weak(rustsecp256k1_v0_9_2_fe *r); +static void rustsecp256k1_v0_10_0_fe_normalize_weak(rustsecp256k1_v0_10_0_fe *r); /** Normalize a field element, without constant-time guarantee. * - * Identical in behavior to rustsecp256k1_v0_9_2_fe_normalize, but not constant time in r. + * Identical in behavior to rustsecp256k1_v0_10_0_fe_normalize, but not constant time in r. */ -static void rustsecp256k1_v0_9_2_fe_normalize_var(rustsecp256k1_v0_9_2_fe *r); +static void rustsecp256k1_v0_10_0_fe_normalize_var(rustsecp256k1_v0_10_0_fe *r); /** Determine whether r represents field element 0. * * On input, r must be a valid field element. * Returns whether r = 0 (mod p). */ -static int rustsecp256k1_v0_9_2_fe_normalizes_to_zero(const rustsecp256k1_v0_9_2_fe *r); +static int rustsecp256k1_v0_10_0_fe_normalizes_to_zero(const rustsecp256k1_v0_10_0_fe *r); /** Determine whether r represents field element 0, without constant-time guarantee. * - * Identical in behavior to rustsecp256k1_v0_9_2_normalizes_to_zero, but not constant time in r. + * Identical in behavior to rustsecp256k1_v0_10_0_normalizes_to_zero, but not constant time in r. */ -static int rustsecp256k1_v0_9_2_fe_normalizes_to_zero_var(const rustsecp256k1_v0_9_2_fe *r); +static int rustsecp256k1_v0_10_0_fe_normalizes_to_zero_var(const rustsecp256k1_v0_10_0_fe *r); /** Set a field element to an integer in range [0,0x7FFF]. * * On input, r does not need to be initialized, a must be in [0,0x7FFF]. * On output, r represents value a, is normalized and has magnitude (a!=0). */ -static void rustsecp256k1_v0_9_2_fe_set_int(rustsecp256k1_v0_9_2_fe *r, int a); +static void rustsecp256k1_v0_10_0_fe_set_int(rustsecp256k1_v0_10_0_fe *r, int a); /** Set a field element to 0. * * On input, a does not need to be initialized. * On output, a represents 0, is normalized and has magnitude 0. */ -static void rustsecp256k1_v0_9_2_fe_clear(rustsecp256k1_v0_9_2_fe *a); +static void rustsecp256k1_v0_10_0_fe_clear(rustsecp256k1_v0_10_0_fe *a); /** Determine whether a represents field element 0. * * On input, a must be a valid normalized field element. * Returns whether a = 0 (mod p). * - * This behaves identical to rustsecp256k1_v0_9_2_normalizes_to_zero{,_var}, but requires + * This behaves identical to rustsecp256k1_v0_10_0_normalizes_to_zero{,_var}, but requires * normalized input (and is much faster). */ -static int rustsecp256k1_v0_9_2_fe_is_zero(const rustsecp256k1_v0_9_2_fe *a); +static int rustsecp256k1_v0_10_0_fe_is_zero(const rustsecp256k1_v0_10_0_fe *a); /** Determine whether a (mod p) is odd. * * On input, a must be a valid normalized field element. * Returns (int(a) mod p) & 1. */ -static int rustsecp256k1_v0_9_2_fe_is_odd(const rustsecp256k1_v0_9_2_fe *a); +static int rustsecp256k1_v0_10_0_fe_is_odd(const rustsecp256k1_v0_10_0_fe *a); /** Determine whether two field elements are equal. * @@ -174,7 +174,7 @@ static int rustsecp256k1_v0_9_2_fe_is_odd(const rustsecp256k1_v0_9_2_fe *a); * 1 and 31, respectively. * Returns a = b (mod p). */ -static int rustsecp256k1_v0_9_2_fe_equal(const rustsecp256k1_v0_9_2_fe *a, const rustsecp256k1_v0_9_2_fe *b); +static int rustsecp256k1_v0_10_0_fe_equal(const rustsecp256k1_v0_10_0_fe *a, const rustsecp256k1_v0_10_0_fe *b); /** Compare the values represented by 2 field elements, without constant-time guarantee. * @@ -182,14 +182,15 @@ static int rustsecp256k1_v0_9_2_fe_equal(const rustsecp256k1_v0_9_2_fe *a, const * Returns 1 if a > b, -1 if a < b, and 0 if a = b (comparisons are done as integers * in range 0..p-1). */ -static int rustsecp256k1_v0_9_2_fe_cmp_var(const rustsecp256k1_v0_9_2_fe *a, const rustsecp256k1_v0_9_2_fe *b); +static int rustsecp256k1_v0_10_0_fe_cmp_var(const rustsecp256k1_v0_10_0_fe *a, const rustsecp256k1_v0_10_0_fe *b); -/** Set a field element equal to a provided 32-byte big endian value, reducing it. +/** Set a field element equal to the element represented by a provided 32-byte big endian value + * interpreted modulo p. * * On input, r does not need to be initialized. a must be a pointer to an initialized 32-byte array. * On output, r = a (mod p). It will have magnitude 1, and not be normalized. */ -static void rustsecp256k1_v0_9_2_fe_set_b32_mod(rustsecp256k1_v0_9_2_fe *r, const unsigned char *a); +static void rustsecp256k1_v0_10_0_fe_set_b32_mod(rustsecp256k1_v0_10_0_fe *r, const unsigned char *a); /** Set a field element equal to a provided 32-byte big endian value, checking for overflow. * @@ -197,13 +198,13 @@ static void rustsecp256k1_v0_9_2_fe_set_b32_mod(rustsecp256k1_v0_9_2_fe *r, cons * On output, r = a if (a < p), it will be normalized with magnitude 1, and 1 is returned. * If a >= p, 0 is returned, and r will be made invalid (and must not be used without overwriting). */ -static int rustsecp256k1_v0_9_2_fe_set_b32_limit(rustsecp256k1_v0_9_2_fe *r, const unsigned char *a); +static int rustsecp256k1_v0_10_0_fe_set_b32_limit(rustsecp256k1_v0_10_0_fe *r, const unsigned char *a); /** Convert a field element to 32-byte big endian byte array. * On input, a must be a valid normalized field element, and r a pointer to a 32-byte array. * On output, r = a (mod p). */ -static void rustsecp256k1_v0_9_2_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_9_2_fe *a); +static void rustsecp256k1_v0_10_0_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_10_0_fe *a); /** Negate a field element. * @@ -212,20 +213,20 @@ static void rustsecp256k1_v0_9_2_fe_get_b32(unsigned char *r, const rustsecp256k * Performs {r = -a}. * On output, r will not be normalized, and will have magnitude m+1. */ -#define rustsecp256k1_v0_9_2_fe_negate(r, a, m) ASSERT_INT_CONST_AND_DO(m, rustsecp256k1_v0_9_2_fe_negate_unchecked(r, a, m)) +#define rustsecp256k1_v0_10_0_fe_negate(r, a, m) ASSERT_INT_CONST_AND_DO(m, rustsecp256k1_v0_10_0_fe_negate_unchecked(r, a, m)) -/** Like rustsecp256k1_v0_9_2_fe_negate_unchecked but m is not checked to be an integer constant expression. +/** Like rustsecp256k1_v0_10_0_fe_negate_unchecked but m is not checked to be an integer constant expression. * * Should not be called directly outside of tests. */ -static void rustsecp256k1_v0_9_2_fe_negate_unchecked(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *a, int m); +static void rustsecp256k1_v0_10_0_fe_negate_unchecked(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *a, int m); /** Add a small integer to a field element. * * Performs {r += a}. The magnitude of r increases by 1, and normalized is cleared. * a must be in range [0,0x7FFF]. */ -static void rustsecp256k1_v0_9_2_fe_add_int(rustsecp256k1_v0_9_2_fe *r, int a); +static void rustsecp256k1_v0_10_0_fe_add_int(rustsecp256k1_v0_10_0_fe *r, int a); /** Multiply a field element with a small integer. * @@ -234,13 +235,13 @@ static void rustsecp256k1_v0_9_2_fe_add_int(rustsecp256k1_v0_9_2_fe *r, int a); * Performs {r *= a}. * On output, r's magnitude is multiplied by a, and r will not be normalized. */ -#define rustsecp256k1_v0_9_2_fe_mul_int(r, a) ASSERT_INT_CONST_AND_DO(a, rustsecp256k1_v0_9_2_fe_mul_int_unchecked(r, a)) +#define rustsecp256k1_v0_10_0_fe_mul_int(r, a) ASSERT_INT_CONST_AND_DO(a, rustsecp256k1_v0_10_0_fe_mul_int_unchecked(r, a)) -/** Like rustsecp256k1_v0_9_2_fe_mul_int but a is not checked to be an integer constant expression. +/** Like rustsecp256k1_v0_10_0_fe_mul_int but a is not checked to be an integer constant expression. * * Should not be called directly outside of tests. */ -static void rustsecp256k1_v0_9_2_fe_mul_int_unchecked(rustsecp256k1_v0_9_2_fe *r, int a); +static void rustsecp256k1_v0_10_0_fe_mul_int_unchecked(rustsecp256k1_v0_10_0_fe *r, int a); /** Increment a field element by another. * @@ -249,7 +250,7 @@ static void rustsecp256k1_v0_9_2_fe_mul_int_unchecked(rustsecp256k1_v0_9_2_fe *r * Performs {r += a}. * On output, r will not be normalized, and will have magnitude incremented by a's. */ -static void rustsecp256k1_v0_9_2_fe_add(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *a); +static void rustsecp256k1_v0_10_0_fe_add(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *a); /** Multiply two field elements. * @@ -259,7 +260,7 @@ static void rustsecp256k1_v0_9_2_fe_add(rustsecp256k1_v0_9_2_fe *r, const rustse * Performs {r = a * b} * On output, r will have magnitude 1, but won't be normalized. */ -static void rustsecp256k1_v0_9_2_fe_mul(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *a, const rustsecp256k1_v0_9_2_fe * SECP256K1_RESTRICT b); +static void rustsecp256k1_v0_10_0_fe_mul(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *a, const rustsecp256k1_v0_10_0_fe * SECP256K1_RESTRICT b); /** Square a field element. * @@ -268,7 +269,7 @@ static void rustsecp256k1_v0_9_2_fe_mul(rustsecp256k1_v0_9_2_fe *r, const rustse * Performs {r = a**2} * On output, r will have magnitude 1, but won't be normalized. */ -static void rustsecp256k1_v0_9_2_fe_sqr(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *a); +static void rustsecp256k1_v0_10_0_fe_sqr(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *a); /** Compute a square root of a field element. * @@ -279,7 +280,7 @@ static void rustsecp256k1_v0_9_2_fe_sqr(rustsecp256k1_v0_9_2_fe *r, const rustse * Variables r and a must not point to the same object. * On output, r will have magnitude 1 but will not be normalized. */ -static int rustsecp256k1_v0_9_2_fe_sqrt(rustsecp256k1_v0_9_2_fe * SECP256K1_RESTRICT r, const rustsecp256k1_v0_9_2_fe * SECP256K1_RESTRICT a); +static int rustsecp256k1_v0_10_0_fe_sqrt(rustsecp256k1_v0_10_0_fe * SECP256K1_RESTRICT r, const rustsecp256k1_v0_10_0_fe * SECP256K1_RESTRICT a); /** Compute the modular inverse of a field element. * @@ -288,31 +289,31 @@ static int rustsecp256k1_v0_9_2_fe_sqrt(rustsecp256k1_v0_9_2_fe * SECP256K1_REST * inverse). * On output, r will have magnitude (a.magnitude != 0) and be normalized. */ -static void rustsecp256k1_v0_9_2_fe_inv(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *a); +static void rustsecp256k1_v0_10_0_fe_inv(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *a); /** Compute the modular inverse of a field element, without constant-time guarantee. * - * Behaves identically to rustsecp256k1_v0_9_2_fe_inv, but is not constant-time in a. + * Behaves identically to rustsecp256k1_v0_10_0_fe_inv, but is not constant-time in a. */ -static void rustsecp256k1_v0_9_2_fe_inv_var(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *a); +static void rustsecp256k1_v0_10_0_fe_inv_var(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *a); -/** Convert a field element to rustsecp256k1_v0_9_2_fe_storage. +/** Convert a field element to rustsecp256k1_v0_10_0_fe_storage. * * On input, a must be a valid normalized field element. * Performs {r = a}. */ -static void rustsecp256k1_v0_9_2_fe_to_storage(rustsecp256k1_v0_9_2_fe_storage *r, const rustsecp256k1_v0_9_2_fe *a); +static void rustsecp256k1_v0_10_0_fe_to_storage(rustsecp256k1_v0_10_0_fe_storage *r, const rustsecp256k1_v0_10_0_fe *a); -/** Convert a field element back from rustsecp256k1_v0_9_2_fe_storage. +/** Convert a field element back from rustsecp256k1_v0_10_0_fe_storage. * * On input, r need not be initialized. * Performs {r = a}. * On output, r will be normalized and will have magnitude 1. */ -static void rustsecp256k1_v0_9_2_fe_from_storage(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe_storage *a); +static void rustsecp256k1_v0_10_0_fe_from_storage(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe_storage *a); /** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized.*/ -static void rustsecp256k1_v0_9_2_fe_storage_cmov(rustsecp256k1_v0_9_2_fe_storage *r, const rustsecp256k1_v0_9_2_fe_storage *a, int flag); +static void rustsecp256k1_v0_10_0_fe_storage_cmov(rustsecp256k1_v0_10_0_fe_storage *r, const rustsecp256k1_v0_10_0_fe_storage *a, int flag); /** Conditionally move a field element in constant time. * @@ -322,7 +323,7 @@ static void rustsecp256k1_v0_9_2_fe_storage_cmov(rustsecp256k1_v0_9_2_fe_storage * On output, r's magnitude will be the maximum of both input magnitudes. * It will be normalized if and only if both inputs were normalized. */ -static void rustsecp256k1_v0_9_2_fe_cmov(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *a, int flag); +static void rustsecp256k1_v0_10_0_fe_cmov(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *a, int flag); /** Halve the value of a field element modulo the field prime in constant-time. * @@ -330,23 +331,25 @@ static void rustsecp256k1_v0_9_2_fe_cmov(rustsecp256k1_v0_9_2_fe *r, const rusts * On output, r will be normalized and have magnitude floor(m/2) + 1 where m is * the magnitude of r on input. */ -static void rustsecp256k1_v0_9_2_fe_half(rustsecp256k1_v0_9_2_fe *r); +static void rustsecp256k1_v0_10_0_fe_half(rustsecp256k1_v0_10_0_fe *r); /** Sets r to a field element with magnitude m, normalized if (and only if) m==0. * The value is chosen so that it is likely to trigger edge cases related to * internal overflows. */ -static void rustsecp256k1_v0_9_2_fe_get_bounds(rustsecp256k1_v0_9_2_fe *r, int m); +static void rustsecp256k1_v0_10_0_fe_get_bounds(rustsecp256k1_v0_10_0_fe *r, int m); /** Determine whether a is a square (modulo p). * * On input, a must be a valid field element. */ -static int rustsecp256k1_v0_9_2_fe_is_square_var(const rustsecp256k1_v0_9_2_fe *a); +static int rustsecp256k1_v0_10_0_fe_is_square_var(const rustsecp256k1_v0_10_0_fe *a); /** Check invariants on a field element (no-op unless VERIFY is enabled). */ -static void rustsecp256k1_v0_9_2_fe_verify(const rustsecp256k1_v0_9_2_fe *a); +static void rustsecp256k1_v0_10_0_fe_verify(const rustsecp256k1_v0_10_0_fe *a); +#define SECP256K1_FE_VERIFY(a) rustsecp256k1_v0_10_0_fe_verify(a) /** Check that magnitude of a is at most m (no-op unless VERIFY is enabled). */ -static void rustsecp256k1_v0_9_2_fe_verify_magnitude(const rustsecp256k1_v0_9_2_fe *a, int m); +static void rustsecp256k1_v0_10_0_fe_verify_magnitude(const rustsecp256k1_v0_10_0_fe *a, int m); +#define SECP256K1_FE_VERIFY_MAGNITUDE(a, m) rustsecp256k1_v0_10_0_fe_verify_magnitude(a, m) #endif /* SECP256K1_FIELD_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/field_10x26.h b/secp256k1-sys/depend/secp256k1/src/field_10x26.h index c4370e1a8..9029e3a84 100644 --- a/secp256k1-sys/depend/secp256k1/src/field_10x26.h +++ b/secp256k1-sys/depend/secp256k1/src/field_10x26.h @@ -31,7 +31,7 @@ typedef struct { * (together these imply n[9] <= 2^22 - 1) */ SECP256K1_FE_VERIFY_FIELDS -} rustsecp256k1_v0_9_2_fe; +} rustsecp256k1_v0_10_0_fe; /* Unpacks a constant into a overlapping multi-limbed FE element. */ #define SECP256K1_FE_CONST_INNER(d7, d6, d5, d4, d3, d2, d1, d0) { \ @@ -49,7 +49,7 @@ typedef struct { typedef struct { uint32_t n[8]; -} rustsecp256k1_v0_9_2_fe_storage; +} rustsecp256k1_v0_10_0_fe_storage; #define SECP256K1_FE_STORAGE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{ (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }} #define SECP256K1_FE_STORAGE_CONST_GET(d) d.n[7], d.n[6], d.n[5], d.n[4],d.n[3], d.n[2], d.n[1], d.n[0] diff --git a/secp256k1-sys/depend/secp256k1/src/field_10x26_impl.h b/secp256k1-sys/depend/secp256k1/src/field_10x26_impl.h index 42b9a78b0..a925b5e51 100644 --- a/secp256k1-sys/depend/secp256k1/src/field_10x26_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/field_10x26_impl.h @@ -13,7 +13,7 @@ #include "modinv32_impl.h" #ifdef VERIFY -static void rustsecp256k1_v0_9_2_fe_impl_verify(const rustsecp256k1_v0_9_2_fe *a) { +static void rustsecp256k1_v0_10_0_fe_impl_verify(const rustsecp256k1_v0_10_0_fe *a) { const uint32_t *d = a->n; int m = a->normalized ? 1 : 2 * a->magnitude; VERIFY_CHECK(d[0] <= 0x3FFFFFFUL * m); @@ -37,7 +37,7 @@ static void rustsecp256k1_v0_9_2_fe_impl_verify(const rustsecp256k1_v0_9_2_fe *a } #endif -static void rustsecp256k1_v0_9_2_fe_impl_get_bounds(rustsecp256k1_v0_9_2_fe *r, int m) { +static void rustsecp256k1_v0_10_0_fe_impl_get_bounds(rustsecp256k1_v0_10_0_fe *r, int m) { r->n[0] = 0x3FFFFFFUL * 2 * m; r->n[1] = 0x3FFFFFFUL * 2 * m; r->n[2] = 0x3FFFFFFUL * 2 * m; @@ -50,7 +50,7 @@ static void rustsecp256k1_v0_9_2_fe_impl_get_bounds(rustsecp256k1_v0_9_2_fe *r, r->n[9] = 0x03FFFFFUL * 2 * m; } -static void rustsecp256k1_v0_9_2_fe_impl_normalize(rustsecp256k1_v0_9_2_fe *r) { +static void rustsecp256k1_v0_10_0_fe_impl_normalize(rustsecp256k1_v0_10_0_fe *r) { uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4], t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9]; @@ -99,7 +99,7 @@ static void rustsecp256k1_v0_9_2_fe_impl_normalize(rustsecp256k1_v0_9_2_fe *r) { r->n[5] = t5; r->n[6] = t6; r->n[7] = t7; r->n[8] = t8; r->n[9] = t9; } -static void rustsecp256k1_v0_9_2_fe_impl_normalize_weak(rustsecp256k1_v0_9_2_fe *r) { +static void rustsecp256k1_v0_10_0_fe_impl_normalize_weak(rustsecp256k1_v0_10_0_fe *r) { uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4], t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9]; @@ -125,7 +125,7 @@ static void rustsecp256k1_v0_9_2_fe_impl_normalize_weak(rustsecp256k1_v0_9_2_fe r->n[5] = t5; r->n[6] = t6; r->n[7] = t7; r->n[8] = t8; r->n[9] = t9; } -static void rustsecp256k1_v0_9_2_fe_impl_normalize_var(rustsecp256k1_v0_9_2_fe *r) { +static void rustsecp256k1_v0_10_0_fe_impl_normalize_var(rustsecp256k1_v0_10_0_fe *r) { uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4], t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9]; @@ -175,7 +175,7 @@ static void rustsecp256k1_v0_9_2_fe_impl_normalize_var(rustsecp256k1_v0_9_2_fe * r->n[5] = t5; r->n[6] = t6; r->n[7] = t7; r->n[8] = t8; r->n[9] = t9; } -static int rustsecp256k1_v0_9_2_fe_impl_normalizes_to_zero(const rustsecp256k1_v0_9_2_fe *r) { +static int rustsecp256k1_v0_10_0_fe_impl_normalizes_to_zero(const rustsecp256k1_v0_10_0_fe *r) { uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4], t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9]; @@ -204,7 +204,7 @@ static int rustsecp256k1_v0_9_2_fe_impl_normalizes_to_zero(const rustsecp256k1_v return (z0 == 0) | (z1 == 0x3FFFFFFUL); } -static int rustsecp256k1_v0_9_2_fe_impl_normalizes_to_zero_var(const rustsecp256k1_v0_9_2_fe *r) { +static int rustsecp256k1_v0_10_0_fe_impl_normalizes_to_zero_var(const rustsecp256k1_v0_10_0_fe *r) { uint32_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9; uint32_t z0, z1; uint32_t x; @@ -256,28 +256,28 @@ static int rustsecp256k1_v0_9_2_fe_impl_normalizes_to_zero_var(const rustsecp256 return (z0 == 0) | (z1 == 0x3FFFFFFUL); } -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_impl_set_int(rustsecp256k1_v0_9_2_fe *r, int a) { +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_impl_set_int(rustsecp256k1_v0_10_0_fe *r, int a) { r->n[0] = a; r->n[1] = r->n[2] = r->n[3] = r->n[4] = r->n[5] = r->n[6] = r->n[7] = r->n[8] = r->n[9] = 0; } -SECP256K1_INLINE static int rustsecp256k1_v0_9_2_fe_impl_is_zero(const rustsecp256k1_v0_9_2_fe *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_10_0_fe_impl_is_zero(const rustsecp256k1_v0_10_0_fe *a) { const uint32_t *t = a->n; return (t[0] | t[1] | t[2] | t[3] | t[4] | t[5] | t[6] | t[7] | t[8] | t[9]) == 0; } -SECP256K1_INLINE static int rustsecp256k1_v0_9_2_fe_impl_is_odd(const rustsecp256k1_v0_9_2_fe *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_10_0_fe_impl_is_odd(const rustsecp256k1_v0_10_0_fe *a) { return a->n[0] & 1; } -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_impl_clear(rustsecp256k1_v0_9_2_fe *a) { +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_impl_clear(rustsecp256k1_v0_10_0_fe *a) { int i; for (i=0; i<10; i++) { a->n[i] = 0; } } -static int rustsecp256k1_v0_9_2_fe_impl_cmp_var(const rustsecp256k1_v0_9_2_fe *a, const rustsecp256k1_v0_9_2_fe *b) { +static int rustsecp256k1_v0_10_0_fe_impl_cmp_var(const rustsecp256k1_v0_10_0_fe *a, const rustsecp256k1_v0_10_0_fe *b) { int i; for (i = 9; i >= 0; i--) { if (a->n[i] > b->n[i]) { @@ -290,7 +290,7 @@ static int rustsecp256k1_v0_9_2_fe_impl_cmp_var(const rustsecp256k1_v0_9_2_fe *a return 0; } -static void rustsecp256k1_v0_9_2_fe_impl_set_b32_mod(rustsecp256k1_v0_9_2_fe *r, const unsigned char *a) { +static void rustsecp256k1_v0_10_0_fe_impl_set_b32_mod(rustsecp256k1_v0_10_0_fe *r, const unsigned char *a) { r->n[0] = (uint32_t)a[31] | ((uint32_t)a[30] << 8) | ((uint32_t)a[29] << 16) | ((uint32_t)(a[28] & 0x3) << 24); r->n[1] = (uint32_t)((a[28] >> 2) & 0x3f) | ((uint32_t)a[27] << 6) | ((uint32_t)a[26] << 14) | ((uint32_t)(a[25] & 0xf) << 22); r->n[2] = (uint32_t)((a[25] >> 4) & 0xf) | ((uint32_t)a[24] << 4) | ((uint32_t)a[23] << 12) | ((uint32_t)(a[22] & 0x3f) << 20); @@ -303,13 +303,13 @@ static void rustsecp256k1_v0_9_2_fe_impl_set_b32_mod(rustsecp256k1_v0_9_2_fe *r, r->n[9] = (uint32_t)((a[2] >> 2) & 0x3f) | ((uint32_t)a[1] << 6) | ((uint32_t)a[0] << 14); } -static int rustsecp256k1_v0_9_2_fe_impl_set_b32_limit(rustsecp256k1_v0_9_2_fe *r, const unsigned char *a) { - rustsecp256k1_v0_9_2_fe_impl_set_b32_mod(r, a); +static int rustsecp256k1_v0_10_0_fe_impl_set_b32_limit(rustsecp256k1_v0_10_0_fe *r, const unsigned char *a) { + rustsecp256k1_v0_10_0_fe_impl_set_b32_mod(r, a); return !((r->n[9] == 0x3FFFFFUL) & ((r->n[8] & r->n[7] & r->n[6] & r->n[5] & r->n[4] & r->n[3] & r->n[2]) == 0x3FFFFFFUL) & ((r->n[1] + 0x40UL + ((r->n[0] + 0x3D1UL) >> 26)) > 0x3FFFFFFUL)); } /** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */ -static void rustsecp256k1_v0_9_2_fe_impl_get_b32(unsigned char *r, const rustsecp256k1_v0_9_2_fe *a) { +static void rustsecp256k1_v0_10_0_fe_impl_get_b32(unsigned char *r, const rustsecp256k1_v0_10_0_fe *a) { r[0] = (a->n[9] >> 14) & 0xff; r[1] = (a->n[9] >> 6) & 0xff; r[2] = ((a->n[9] & 0x3F) << 2) | ((a->n[8] >> 24) & 0x3); @@ -344,7 +344,7 @@ static void rustsecp256k1_v0_9_2_fe_impl_get_b32(unsigned char *r, const rustsec r[31] = a->n[0] & 0xff; } -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_impl_negate_unchecked(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *a, int m) { +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_impl_negate_unchecked(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *a, int m) { /* For all legal values of m (0..31), the following properties hold: */ VERIFY_CHECK(0x3FFFC2FUL * 2 * (m + 1) >= 0x3FFFFFFUL * 2 * m); VERIFY_CHECK(0x3FFFFBFUL * 2 * (m + 1) >= 0x3FFFFFFUL * 2 * m); @@ -365,7 +365,7 @@ SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_impl_negate_unchecked(rusts r->n[9] = 0x03FFFFFUL * 2 * (m + 1) - a->n[9]; } -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_impl_mul_int_unchecked(rustsecp256k1_v0_9_2_fe *r, int a) { +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_impl_mul_int_unchecked(rustsecp256k1_v0_10_0_fe *r, int a) { r->n[0] *= a; r->n[1] *= a; r->n[2] *= a; @@ -378,7 +378,7 @@ SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_impl_mul_int_unchecked(rust r->n[9] *= a; } -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_impl_add(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *a) { +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_impl_add(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *a) { r->n[0] += a->n[0]; r->n[1] += a->n[1]; r->n[2] += a->n[2]; @@ -391,25 +391,21 @@ SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_impl_add(rustsecp256k1_v0_9 r->n[9] += a->n[9]; } -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_impl_add_int(rustsecp256k1_v0_9_2_fe *r, int a) { +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_impl_add_int(rustsecp256k1_v0_10_0_fe *r, int a) { r->n[0] += a; } #if defined(USE_EXTERNAL_ASM) /* External assembler implementation */ -void rustsecp256k1_v0_9_2_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b); -void rustsecp256k1_v0_9_2_fe_sqr_inner(uint32_t *r, const uint32_t *a); +void rustsecp256k1_v0_10_0_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b); +void rustsecp256k1_v0_10_0_fe_sqr_inner(uint32_t *r, const uint32_t *a); #else -#ifdef VERIFY #define VERIFY_BITS(x, n) VERIFY_CHECK(((x) >> (n)) == 0) -#else -#define VERIFY_BITS(x, n) do { } while(0) -#endif -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b) { +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b) { uint64_t c, d; uint64_t u0, u1, u2, u3, u4, u5, u6, u7, u8; uint32_t t9, t1, t0, t2, t3, t4, t5, t6, t7; @@ -739,7 +735,7 @@ SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_mul_inner(uint32_t *r, cons /* [r9 r8 r7 r6 r5 r4 r3 r2 r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ } -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_sqr_inner(uint32_t *r, const uint32_t *a) { +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_sqr_inner(uint32_t *r, const uint32_t *a) { uint64_t c, d; uint64_t u0, u1, u2, u3, u4, u5, u6, u7, u8; uint32_t t9, t0, t1, t2, t3, t4, t5, t6, t7; @@ -1014,15 +1010,15 @@ SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_sqr_inner(uint32_t *r, cons } #endif -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_impl_mul(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *a, const rustsecp256k1_v0_9_2_fe * SECP256K1_RESTRICT b) { - rustsecp256k1_v0_9_2_fe_mul_inner(r->n, a->n, b->n); +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_impl_mul(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *a, const rustsecp256k1_v0_10_0_fe * SECP256K1_RESTRICT b) { + rustsecp256k1_v0_10_0_fe_mul_inner(r->n, a->n, b->n); } -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_impl_sqr(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *a) { - rustsecp256k1_v0_9_2_fe_sqr_inner(r->n, a->n); +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_impl_sqr(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *a) { + rustsecp256k1_v0_10_0_fe_sqr_inner(r->n, a->n); } -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_impl_cmov(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *a, int flag) { +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_impl_cmov(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *a, int flag) { uint32_t mask0, mask1; volatile int vflag = flag; SECP256K1_CHECKMEM_CHECK_VERIFY(r->n, sizeof(r->n)); @@ -1040,7 +1036,7 @@ SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_impl_cmov(rustsecp256k1_v0_ r->n[9] = (r->n[9] & mask0) | (a->n[9] & mask1); } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_fe_impl_half(rustsecp256k1_v0_9_2_fe *r) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_fe_impl_half(rustsecp256k1_v0_10_0_fe *r) { uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4], t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9]; uint32_t one = (uint32_t)1; @@ -1105,7 +1101,7 @@ static SECP256K1_INLINE void rustsecp256k1_v0_9_2_fe_impl_half(rustsecp256k1_v0_ */ } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_fe_storage_cmov(rustsecp256k1_v0_9_2_fe_storage *r, const rustsecp256k1_v0_9_2_fe_storage *a, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_fe_storage_cmov(rustsecp256k1_v0_10_0_fe_storage *r, const rustsecp256k1_v0_10_0_fe_storage *a, int flag) { uint32_t mask0, mask1; volatile int vflag = flag; SECP256K1_CHECKMEM_CHECK_VERIFY(r->n, sizeof(r->n)); @@ -1121,7 +1117,7 @@ static SECP256K1_INLINE void rustsecp256k1_v0_9_2_fe_storage_cmov(rustsecp256k1_ r->n[7] = (r->n[7] & mask0) | (a->n[7] & mask1); } -static void rustsecp256k1_v0_9_2_fe_impl_to_storage(rustsecp256k1_v0_9_2_fe_storage *r, const rustsecp256k1_v0_9_2_fe *a) { +static void rustsecp256k1_v0_10_0_fe_impl_to_storage(rustsecp256k1_v0_10_0_fe_storage *r, const rustsecp256k1_v0_10_0_fe *a) { r->n[0] = a->n[0] | a->n[1] << 26; r->n[1] = a->n[1] >> 6 | a->n[2] << 20; r->n[2] = a->n[2] >> 12 | a->n[3] << 14; @@ -1132,7 +1128,7 @@ static void rustsecp256k1_v0_9_2_fe_impl_to_storage(rustsecp256k1_v0_9_2_fe_stor r->n[7] = a->n[8] >> 16 | a->n[9] << 10; } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_fe_impl_from_storage(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe_storage *a) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_fe_impl_from_storage(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe_storage *a) { r->n[0] = a->n[0] & 0x3FFFFFFUL; r->n[1] = a->n[0] >> 26 | ((a->n[1] << 6) & 0x3FFFFFFUL); r->n[2] = a->n[1] >> 20 | ((a->n[2] << 12) & 0x3FFFFFFUL); @@ -1145,12 +1141,12 @@ static SECP256K1_INLINE void rustsecp256k1_v0_9_2_fe_impl_from_storage(rustsecp2 r->n[9] = a->n[7] >> 10; } -static void rustsecp256k1_v0_9_2_fe_from_signed30(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_modinv32_signed30 *a) { +static void rustsecp256k1_v0_10_0_fe_from_signed30(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_modinv32_signed30 *a) { const uint32_t M26 = UINT32_MAX >> 6; const uint32_t a0 = a->v[0], a1 = a->v[1], a2 = a->v[2], a3 = a->v[3], a4 = a->v[4], a5 = a->v[5], a6 = a->v[6], a7 = a->v[7], a8 = a->v[8]; - /* The output from rustsecp256k1_v0_9_2_modinv32{_var} should be normalized to range [0,modulus), and + /* The output from rustsecp256k1_v0_10_0_modinv32{_var} should be normalized to range [0,modulus), and * have limbs in [0,2^30). The modulus is < 2^256, so the top limb must be below 2^(256-30*8). */ VERIFY_CHECK(a0 >> 30 == 0); @@ -1175,7 +1171,7 @@ static void rustsecp256k1_v0_9_2_fe_from_signed30(rustsecp256k1_v0_9_2_fe *r, co r->n[9] = (a7 >> 24 | a8 << 6); } -static void rustsecp256k1_v0_9_2_fe_to_signed30(rustsecp256k1_v0_9_2_modinv32_signed30 *r, const rustsecp256k1_v0_9_2_fe *a) { +static void rustsecp256k1_v0_10_0_fe_to_signed30(rustsecp256k1_v0_10_0_modinv32_signed30 *r, const rustsecp256k1_v0_10_0_fe *a) { const uint32_t M30 = UINT32_MAX >> 2; const uint64_t a0 = a->n[0], a1 = a->n[1], a2 = a->n[2], a3 = a->n[3], a4 = a->n[4], a5 = a->n[5], a6 = a->n[6], a7 = a->n[7], a8 = a->n[8], a9 = a->n[9]; @@ -1192,48 +1188,48 @@ static void rustsecp256k1_v0_9_2_fe_to_signed30(rustsecp256k1_v0_9_2_modinv32_si r->v[8] = a9 >> 6; } -static const rustsecp256k1_v0_9_2_modinv32_modinfo rustsecp256k1_v0_9_2_const_modinfo_fe = { +static const rustsecp256k1_v0_10_0_modinv32_modinfo rustsecp256k1_v0_10_0_const_modinfo_fe = { {{-0x3D1, -4, 0, 0, 0, 0, 0, 0, 65536}}, 0x2DDACACFL }; -static void rustsecp256k1_v0_9_2_fe_impl_inv(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *x) { - rustsecp256k1_v0_9_2_fe tmp = *x; - rustsecp256k1_v0_9_2_modinv32_signed30 s; +static void rustsecp256k1_v0_10_0_fe_impl_inv(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *x) { + rustsecp256k1_v0_10_0_fe tmp = *x; + rustsecp256k1_v0_10_0_modinv32_signed30 s; - rustsecp256k1_v0_9_2_fe_normalize(&tmp); - rustsecp256k1_v0_9_2_fe_to_signed30(&s, &tmp); - rustsecp256k1_v0_9_2_modinv32(&s, &rustsecp256k1_v0_9_2_const_modinfo_fe); - rustsecp256k1_v0_9_2_fe_from_signed30(r, &s); + rustsecp256k1_v0_10_0_fe_normalize(&tmp); + rustsecp256k1_v0_10_0_fe_to_signed30(&s, &tmp); + rustsecp256k1_v0_10_0_modinv32(&s, &rustsecp256k1_v0_10_0_const_modinfo_fe); + rustsecp256k1_v0_10_0_fe_from_signed30(r, &s); } -static void rustsecp256k1_v0_9_2_fe_impl_inv_var(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *x) { - rustsecp256k1_v0_9_2_fe tmp = *x; - rustsecp256k1_v0_9_2_modinv32_signed30 s; +static void rustsecp256k1_v0_10_0_fe_impl_inv_var(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *x) { + rustsecp256k1_v0_10_0_fe tmp = *x; + rustsecp256k1_v0_10_0_modinv32_signed30 s; - rustsecp256k1_v0_9_2_fe_normalize_var(&tmp); - rustsecp256k1_v0_9_2_fe_to_signed30(&s, &tmp); - rustsecp256k1_v0_9_2_modinv32_var(&s, &rustsecp256k1_v0_9_2_const_modinfo_fe); - rustsecp256k1_v0_9_2_fe_from_signed30(r, &s); + rustsecp256k1_v0_10_0_fe_normalize_var(&tmp); + rustsecp256k1_v0_10_0_fe_to_signed30(&s, &tmp); + rustsecp256k1_v0_10_0_modinv32_var(&s, &rustsecp256k1_v0_10_0_const_modinfo_fe); + rustsecp256k1_v0_10_0_fe_from_signed30(r, &s); } -static int rustsecp256k1_v0_9_2_fe_impl_is_square_var(const rustsecp256k1_v0_9_2_fe *x) { - rustsecp256k1_v0_9_2_fe tmp; - rustsecp256k1_v0_9_2_modinv32_signed30 s; +static int rustsecp256k1_v0_10_0_fe_impl_is_square_var(const rustsecp256k1_v0_10_0_fe *x) { + rustsecp256k1_v0_10_0_fe tmp; + rustsecp256k1_v0_10_0_modinv32_signed30 s; int jac, ret; tmp = *x; - rustsecp256k1_v0_9_2_fe_normalize_var(&tmp); - /* rustsecp256k1_v0_9_2_jacobi32_maybe_var cannot deal with input 0. */ - if (rustsecp256k1_v0_9_2_fe_is_zero(&tmp)) return 1; - rustsecp256k1_v0_9_2_fe_to_signed30(&s, &tmp); - jac = rustsecp256k1_v0_9_2_jacobi32_maybe_var(&s, &rustsecp256k1_v0_9_2_const_modinfo_fe); + rustsecp256k1_v0_10_0_fe_normalize_var(&tmp); + /* rustsecp256k1_v0_10_0_jacobi32_maybe_var cannot deal with input 0. */ + if (rustsecp256k1_v0_10_0_fe_is_zero(&tmp)) return 1; + rustsecp256k1_v0_10_0_fe_to_signed30(&s, &tmp); + jac = rustsecp256k1_v0_10_0_jacobi32_maybe_var(&s, &rustsecp256k1_v0_10_0_const_modinfo_fe); if (jac == 0) { - /* rustsecp256k1_v0_9_2_jacobi32_maybe_var failed to compute the Jacobi symbol. Fall back + /* rustsecp256k1_v0_10_0_jacobi32_maybe_var failed to compute the Jacobi symbol. Fall back * to computing a square root. This should be extremely rare with random * input (except in VERIFY mode, where a lower iteration count is used). */ - rustsecp256k1_v0_9_2_fe dummy; - ret = rustsecp256k1_v0_9_2_fe_sqrt(&dummy, &tmp); + rustsecp256k1_v0_10_0_fe dummy; + ret = rustsecp256k1_v0_10_0_fe_sqrt(&dummy, &tmp); } else { ret = jac >= 0; } diff --git a/secp256k1-sys/depend/secp256k1/src/field_5x52.h b/secp256k1-sys/depend/secp256k1/src/field_5x52.h index 5a93c9bc6..25f95b6fc 100644 --- a/secp256k1-sys/depend/secp256k1/src/field_5x52.h +++ b/secp256k1-sys/depend/secp256k1/src/field_5x52.h @@ -31,7 +31,7 @@ typedef struct { * (together these imply n[4] <= 2^48 - 1) */ SECP256K1_FE_VERIFY_FIELDS -} rustsecp256k1_v0_9_2_fe; +} rustsecp256k1_v0_10_0_fe; /* Unpacks a constant into a overlapping multi-limbed FE element. */ #define SECP256K1_FE_CONST_INNER(d7, d6, d5, d4, d3, d2, d1, d0) { \ @@ -44,7 +44,7 @@ typedef struct { typedef struct { uint64_t n[4]; -} rustsecp256k1_v0_9_2_fe_storage; +} rustsecp256k1_v0_10_0_fe_storage; #define SECP256K1_FE_STORAGE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{ \ (d0) | (((uint64_t)(d1)) << 32), \ diff --git a/secp256k1-sys/depend/secp256k1/src/field_5x52_asm_impl.h b/secp256k1-sys/depend/secp256k1/src/field_5x52_asm_impl.h deleted file mode 100644 index c0f96bd85..000000000 --- a/secp256k1-sys/depend/secp256k1/src/field_5x52_asm_impl.h +++ /dev/null @@ -1,504 +0,0 @@ -/*********************************************************************** - * Copyright (c) 2013-2014 Diederik Huys, Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or https://www.opensource.org/licenses/mit-license.php.* - ***********************************************************************/ - -/** - * Changelog: - * - March 2013, Diederik Huys: original version - * - November 2014, Pieter Wuille: updated to use Peter Dettman's parallel multiplication algorithm - * - December 2014, Pieter Wuille: converted from YASM to GCC inline assembly - */ - -#ifndef SECP256K1_FIELD_INNER5X52_IMPL_H -#define SECP256K1_FIELD_INNER5X52_IMPL_H - -#include "util.h" - -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) { -/** - * Registers: rdx:rax = multiplication accumulator - * r9:r8 = c - * r15:rcx = d - * r10-r14 = a0-a4 - * rbx = b - * rdi = r - * rsi = a / t? - */ - uint64_t tmp1, tmp2, tmp3; -__asm__ __volatile__( - "movq 0(%%rsi),%%r10\n" - "movq 8(%%rsi),%%r11\n" - "movq 16(%%rsi),%%r12\n" - "movq 24(%%rsi),%%r13\n" - "movq 32(%%rsi),%%r14\n" - - /* d += a3 * b0 */ - "movq 0(%%rbx),%%rax\n" - "mulq %%r13\n" - "movq %%rax,%%rcx\n" - "movq %%rdx,%%r15\n" - /* d += a2 * b1 */ - "movq 8(%%rbx),%%rax\n" - "mulq %%r12\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* d += a1 * b2 */ - "movq 16(%%rbx),%%rax\n" - "mulq %%r11\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* d = a0 * b3 */ - "movq 24(%%rbx),%%rax\n" - "mulq %%r10\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* c = a4 * b4 */ - "movq 32(%%rbx),%%rax\n" - "mulq %%r14\n" - "movq %%rax,%%r8\n" - "movq %%rdx,%%r9\n" - /* d += (c & M) * R */ - "movq $0xfffffffffffff,%%rdx\n" - "andq %%rdx,%%rax\n" - "movq $0x1000003d10,%%rdx\n" - "mulq %%rdx\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* c >>= 52 (%%r8 only) */ - "shrdq $52,%%r9,%%r8\n" - /* t3 (tmp1) = d & M */ - "movq %%rcx,%%rsi\n" - "movq $0xfffffffffffff,%%rdx\n" - "andq %%rdx,%%rsi\n" - "movq %%rsi,%q1\n" - /* d >>= 52 */ - "shrdq $52,%%r15,%%rcx\n" - "xorq %%r15,%%r15\n" - /* d += a4 * b0 */ - "movq 0(%%rbx),%%rax\n" - "mulq %%r14\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* d += a3 * b1 */ - "movq 8(%%rbx),%%rax\n" - "mulq %%r13\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* d += a2 * b2 */ - "movq 16(%%rbx),%%rax\n" - "mulq %%r12\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* d += a1 * b3 */ - "movq 24(%%rbx),%%rax\n" - "mulq %%r11\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* d += a0 * b4 */ - "movq 32(%%rbx),%%rax\n" - "mulq %%r10\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* d += c * R */ - "movq %%r8,%%rax\n" - "movq $0x1000003d10,%%rdx\n" - "mulq %%rdx\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* t4 = d & M (%%rsi) */ - "movq %%rcx,%%rsi\n" - "movq $0xfffffffffffff,%%rdx\n" - "andq %%rdx,%%rsi\n" - /* d >>= 52 */ - "shrdq $52,%%r15,%%rcx\n" - "xorq %%r15,%%r15\n" - /* tx = t4 >> 48 (tmp3) */ - "movq %%rsi,%%rax\n" - "shrq $48,%%rax\n" - "movq %%rax,%q3\n" - /* t4 &= (M >> 4) (tmp2) */ - "movq $0xffffffffffff,%%rax\n" - "andq %%rax,%%rsi\n" - "movq %%rsi,%q2\n" - /* c = a0 * b0 */ - "movq 0(%%rbx),%%rax\n" - "mulq %%r10\n" - "movq %%rax,%%r8\n" - "movq %%rdx,%%r9\n" - /* d += a4 * b1 */ - "movq 8(%%rbx),%%rax\n" - "mulq %%r14\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* d += a3 * b2 */ - "movq 16(%%rbx),%%rax\n" - "mulq %%r13\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* d += a2 * b3 */ - "movq 24(%%rbx),%%rax\n" - "mulq %%r12\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* d += a1 * b4 */ - "movq 32(%%rbx),%%rax\n" - "mulq %%r11\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* u0 = d & M (%%rsi) */ - "movq %%rcx,%%rsi\n" - "movq $0xfffffffffffff,%%rdx\n" - "andq %%rdx,%%rsi\n" - /* d >>= 52 */ - "shrdq $52,%%r15,%%rcx\n" - "xorq %%r15,%%r15\n" - /* u0 = (u0 << 4) | tx (%%rsi) */ - "shlq $4,%%rsi\n" - "movq %q3,%%rax\n" - "orq %%rax,%%rsi\n" - /* c += u0 * (R >> 4) */ - "movq $0x1000003d1,%%rax\n" - "mulq %%rsi\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* r[0] = c & M */ - "movq %%r8,%%rax\n" - "movq $0xfffffffffffff,%%rdx\n" - "andq %%rdx,%%rax\n" - "movq %%rax,0(%%rdi)\n" - /* c >>= 52 */ - "shrdq $52,%%r9,%%r8\n" - "xorq %%r9,%%r9\n" - /* c += a1 * b0 */ - "movq 0(%%rbx),%%rax\n" - "mulq %%r11\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* c += a0 * b1 */ - "movq 8(%%rbx),%%rax\n" - "mulq %%r10\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* d += a4 * b2 */ - "movq 16(%%rbx),%%rax\n" - "mulq %%r14\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* d += a3 * b3 */ - "movq 24(%%rbx),%%rax\n" - "mulq %%r13\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* d += a2 * b4 */ - "movq 32(%%rbx),%%rax\n" - "mulq %%r12\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* c += (d & M) * R */ - "movq %%rcx,%%rax\n" - "movq $0xfffffffffffff,%%rdx\n" - "andq %%rdx,%%rax\n" - "movq $0x1000003d10,%%rdx\n" - "mulq %%rdx\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* d >>= 52 */ - "shrdq $52,%%r15,%%rcx\n" - "xorq %%r15,%%r15\n" - /* r[1] = c & M */ - "movq %%r8,%%rax\n" - "movq $0xfffffffffffff,%%rdx\n" - "andq %%rdx,%%rax\n" - "movq %%rax,8(%%rdi)\n" - /* c >>= 52 */ - "shrdq $52,%%r9,%%r8\n" - "xorq %%r9,%%r9\n" - /* c += a2 * b0 */ - "movq 0(%%rbx),%%rax\n" - "mulq %%r12\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* c += a1 * b1 */ - "movq 8(%%rbx),%%rax\n" - "mulq %%r11\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* c += a0 * b2 (last use of %%r10 = a0) */ - "movq 16(%%rbx),%%rax\n" - "mulq %%r10\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* fetch t3 (%%r10, overwrites a0), t4 (%%rsi) */ - "movq %q2,%%rsi\n" - "movq %q1,%%r10\n" - /* d += a4 * b3 */ - "movq 24(%%rbx),%%rax\n" - "mulq %%r14\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* d += a3 * b4 */ - "movq 32(%%rbx),%%rax\n" - "mulq %%r13\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* c += (d & M) * R */ - "movq %%rcx,%%rax\n" - "movq $0xfffffffffffff,%%rdx\n" - "andq %%rdx,%%rax\n" - "movq $0x1000003d10,%%rdx\n" - "mulq %%rdx\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* d >>= 52 (%%rcx only) */ - "shrdq $52,%%r15,%%rcx\n" - /* r[2] = c & M */ - "movq %%r8,%%rax\n" - "movq $0xfffffffffffff,%%rdx\n" - "andq %%rdx,%%rax\n" - "movq %%rax,16(%%rdi)\n" - /* c >>= 52 */ - "shrdq $52,%%r9,%%r8\n" - "xorq %%r9,%%r9\n" - /* c += t3 */ - "addq %%r10,%%r8\n" - /* c += d * R */ - "movq %%rcx,%%rax\n" - "movq $0x1000003d10,%%rdx\n" - "mulq %%rdx\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* r[3] = c & M */ - "movq %%r8,%%rax\n" - "movq $0xfffffffffffff,%%rdx\n" - "andq %%rdx,%%rax\n" - "movq %%rax,24(%%rdi)\n" - /* c >>= 52 (%%r8 only) */ - "shrdq $52,%%r9,%%r8\n" - /* c += t4 (%%r8 only) */ - "addq %%rsi,%%r8\n" - /* r[4] = c */ - "movq %%r8,32(%%rdi)\n" -: "+S"(a), "=&m"(tmp1), "=&m"(tmp2), "=&m"(tmp3) -: "b"(b), "D"(r) -: "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", "cc", "memory" -); -} - -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_sqr_inner(uint64_t *r, const uint64_t *a) { -/** - * Registers: rdx:rax = multiplication accumulator - * r9:r8 = c - * rcx:rbx = d - * r10-r14 = a0-a4 - * r15 = M (0xfffffffffffff) - * rdi = r - * rsi = a / t? - */ - uint64_t tmp1, tmp2, tmp3; -__asm__ __volatile__( - "movq 0(%%rsi),%%r10\n" - "movq 8(%%rsi),%%r11\n" - "movq 16(%%rsi),%%r12\n" - "movq 24(%%rsi),%%r13\n" - "movq 32(%%rsi),%%r14\n" - "movq $0xfffffffffffff,%%r15\n" - - /* d = (a0*2) * a3 */ - "leaq (%%r10,%%r10,1),%%rax\n" - "mulq %%r13\n" - "movq %%rax,%%rbx\n" - "movq %%rdx,%%rcx\n" - /* d += (a1*2) * a2 */ - "leaq (%%r11,%%r11,1),%%rax\n" - "mulq %%r12\n" - "addq %%rax,%%rbx\n" - "adcq %%rdx,%%rcx\n" - /* c = a4 * a4 */ - "movq %%r14,%%rax\n" - "mulq %%r14\n" - "movq %%rax,%%r8\n" - "movq %%rdx,%%r9\n" - /* d += (c & M) * R */ - "andq %%r15,%%rax\n" - "movq $0x1000003d10,%%rdx\n" - "mulq %%rdx\n" - "addq %%rax,%%rbx\n" - "adcq %%rdx,%%rcx\n" - /* c >>= 52 (%%r8 only) */ - "shrdq $52,%%r9,%%r8\n" - /* t3 (tmp1) = d & M */ - "movq %%rbx,%%rsi\n" - "andq %%r15,%%rsi\n" - "movq %%rsi,%q1\n" - /* d >>= 52 */ - "shrdq $52,%%rcx,%%rbx\n" - "xorq %%rcx,%%rcx\n" - /* a4 *= 2 */ - "addq %%r14,%%r14\n" - /* d += a0 * a4 */ - "movq %%r10,%%rax\n" - "mulq %%r14\n" - "addq %%rax,%%rbx\n" - "adcq %%rdx,%%rcx\n" - /* d+= (a1*2) * a3 */ - "leaq (%%r11,%%r11,1),%%rax\n" - "mulq %%r13\n" - "addq %%rax,%%rbx\n" - "adcq %%rdx,%%rcx\n" - /* d += a2 * a2 */ - "movq %%r12,%%rax\n" - "mulq %%r12\n" - "addq %%rax,%%rbx\n" - "adcq %%rdx,%%rcx\n" - /* d += c * R */ - "movq %%r8,%%rax\n" - "movq $0x1000003d10,%%rdx\n" - "mulq %%rdx\n" - "addq %%rax,%%rbx\n" - "adcq %%rdx,%%rcx\n" - /* t4 = d & M (%%rsi) */ - "movq %%rbx,%%rsi\n" - "andq %%r15,%%rsi\n" - /* d >>= 52 */ - "shrdq $52,%%rcx,%%rbx\n" - "xorq %%rcx,%%rcx\n" - /* tx = t4 >> 48 (tmp3) */ - "movq %%rsi,%%rax\n" - "shrq $48,%%rax\n" - "movq %%rax,%q3\n" - /* t4 &= (M >> 4) (tmp2) */ - "movq $0xffffffffffff,%%rax\n" - "andq %%rax,%%rsi\n" - "movq %%rsi,%q2\n" - /* c = a0 * a0 */ - "movq %%r10,%%rax\n" - "mulq %%r10\n" - "movq %%rax,%%r8\n" - "movq %%rdx,%%r9\n" - /* d += a1 * a4 */ - "movq %%r11,%%rax\n" - "mulq %%r14\n" - "addq %%rax,%%rbx\n" - "adcq %%rdx,%%rcx\n" - /* d += (a2*2) * a3 */ - "leaq (%%r12,%%r12,1),%%rax\n" - "mulq %%r13\n" - "addq %%rax,%%rbx\n" - "adcq %%rdx,%%rcx\n" - /* u0 = d & M (%%rsi) */ - "movq %%rbx,%%rsi\n" - "andq %%r15,%%rsi\n" - /* d >>= 52 */ - "shrdq $52,%%rcx,%%rbx\n" - "xorq %%rcx,%%rcx\n" - /* u0 = (u0 << 4) | tx (%%rsi) */ - "shlq $4,%%rsi\n" - "movq %q3,%%rax\n" - "orq %%rax,%%rsi\n" - /* c += u0 * (R >> 4) */ - "movq $0x1000003d1,%%rax\n" - "mulq %%rsi\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* r[0] = c & M */ - "movq %%r8,%%rax\n" - "andq %%r15,%%rax\n" - "movq %%rax,0(%%rdi)\n" - /* c >>= 52 */ - "shrdq $52,%%r9,%%r8\n" - "xorq %%r9,%%r9\n" - /* a0 *= 2 */ - "addq %%r10,%%r10\n" - /* c += a0 * a1 */ - "movq %%r10,%%rax\n" - "mulq %%r11\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* d += a2 * a4 */ - "movq %%r12,%%rax\n" - "mulq %%r14\n" - "addq %%rax,%%rbx\n" - "adcq %%rdx,%%rcx\n" - /* d += a3 * a3 */ - "movq %%r13,%%rax\n" - "mulq %%r13\n" - "addq %%rax,%%rbx\n" - "adcq %%rdx,%%rcx\n" - /* c += (d & M) * R */ - "movq %%rbx,%%rax\n" - "andq %%r15,%%rax\n" - "movq $0x1000003d10,%%rdx\n" - "mulq %%rdx\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* d >>= 52 */ - "shrdq $52,%%rcx,%%rbx\n" - "xorq %%rcx,%%rcx\n" - /* r[1] = c & M */ - "movq %%r8,%%rax\n" - "andq %%r15,%%rax\n" - "movq %%rax,8(%%rdi)\n" - /* c >>= 52 */ - "shrdq $52,%%r9,%%r8\n" - "xorq %%r9,%%r9\n" - /* c += a0 * a2 (last use of %%r10) */ - "movq %%r10,%%rax\n" - "mulq %%r12\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* fetch t3 (%%r10, overwrites a0),t4 (%%rsi) */ - "movq %q2,%%rsi\n" - "movq %q1,%%r10\n" - /* c += a1 * a1 */ - "movq %%r11,%%rax\n" - "mulq %%r11\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* d += a3 * a4 */ - "movq %%r13,%%rax\n" - "mulq %%r14\n" - "addq %%rax,%%rbx\n" - "adcq %%rdx,%%rcx\n" - /* c += (d & M) * R */ - "movq %%rbx,%%rax\n" - "andq %%r15,%%rax\n" - "movq $0x1000003d10,%%rdx\n" - "mulq %%rdx\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* d >>= 52 (%%rbx only) */ - "shrdq $52,%%rcx,%%rbx\n" - /* r[2] = c & M */ - "movq %%r8,%%rax\n" - "andq %%r15,%%rax\n" - "movq %%rax,16(%%rdi)\n" - /* c >>= 52 */ - "shrdq $52,%%r9,%%r8\n" - "xorq %%r9,%%r9\n" - /* c += t3 */ - "addq %%r10,%%r8\n" - /* c += d * R */ - "movq %%rbx,%%rax\n" - "movq $0x1000003d10,%%rdx\n" - "mulq %%rdx\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* r[3] = c & M */ - "movq %%r8,%%rax\n" - "andq %%r15,%%rax\n" - "movq %%rax,24(%%rdi)\n" - /* c >>= 52 (%%r8 only) */ - "shrdq $52,%%r9,%%r8\n" - /* c += t4 (%%r8 only) */ - "addq %%rsi,%%r8\n" - /* r[4] = c */ - "movq %%r8,32(%%rdi)\n" -: "+S"(a), "=&m"(tmp1), "=&m"(tmp2), "=&m"(tmp3) -: "D"(r) -: "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", "cc", "memory" -); -} - -#endif /* SECP256K1_FIELD_INNER5X52_IMPL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/field_5x52_impl.h b/secp256k1-sys/depend/secp256k1/src/field_5x52_impl.h index 6c02437fe..8938e6517 100644 --- a/secp256k1-sys/depend/secp256k1/src/field_5x52_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/field_5x52_impl.h @@ -12,14 +12,10 @@ #include "field.h" #include "modinv64_impl.h" -#if defined(USE_ASM_X86_64) -#include "field_5x52_asm_impl.h" -#else #include "field_5x52_int128_impl.h" -#endif #ifdef VERIFY -static void rustsecp256k1_v0_9_2_fe_impl_verify(const rustsecp256k1_v0_9_2_fe *a) { +static void rustsecp256k1_v0_10_0_fe_impl_verify(const rustsecp256k1_v0_10_0_fe *a) { const uint64_t *d = a->n; int m = a->normalized ? 1 : 2 * a->magnitude; /* secp256k1 'p' value defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */ @@ -36,7 +32,7 @@ static void rustsecp256k1_v0_9_2_fe_impl_verify(const rustsecp256k1_v0_9_2_fe *a } #endif -static void rustsecp256k1_v0_9_2_fe_impl_get_bounds(rustsecp256k1_v0_9_2_fe *r, int m) { +static void rustsecp256k1_v0_10_0_fe_impl_get_bounds(rustsecp256k1_v0_10_0_fe *r, int m) { r->n[0] = 0xFFFFFFFFFFFFFULL * 2 * m; r->n[1] = 0xFFFFFFFFFFFFFULL * 2 * m; r->n[2] = 0xFFFFFFFFFFFFFULL * 2 * m; @@ -44,7 +40,7 @@ static void rustsecp256k1_v0_9_2_fe_impl_get_bounds(rustsecp256k1_v0_9_2_fe *r, r->n[4] = 0x0FFFFFFFFFFFFULL * 2 * m; } -static void rustsecp256k1_v0_9_2_fe_impl_normalize(rustsecp256k1_v0_9_2_fe *r) { +static void rustsecp256k1_v0_10_0_fe_impl_normalize(rustsecp256k1_v0_10_0_fe *r) { uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4]; /* Reduce t4 at the start so there will be at most a single carry from the first pass */ @@ -81,7 +77,7 @@ static void rustsecp256k1_v0_9_2_fe_impl_normalize(rustsecp256k1_v0_9_2_fe *r) { r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4; } -static void rustsecp256k1_v0_9_2_fe_impl_normalize_weak(rustsecp256k1_v0_9_2_fe *r) { +static void rustsecp256k1_v0_10_0_fe_impl_normalize_weak(rustsecp256k1_v0_10_0_fe *r) { uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4]; /* Reduce t4 at the start so there will be at most a single carry from the first pass */ @@ -100,7 +96,7 @@ static void rustsecp256k1_v0_9_2_fe_impl_normalize_weak(rustsecp256k1_v0_9_2_fe r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4; } -static void rustsecp256k1_v0_9_2_fe_impl_normalize_var(rustsecp256k1_v0_9_2_fe *r) { +static void rustsecp256k1_v0_10_0_fe_impl_normalize_var(rustsecp256k1_v0_10_0_fe *r) { uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4]; /* Reduce t4 at the start so there will be at most a single carry from the first pass */ @@ -138,7 +134,7 @@ static void rustsecp256k1_v0_9_2_fe_impl_normalize_var(rustsecp256k1_v0_9_2_fe * r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4; } -static int rustsecp256k1_v0_9_2_fe_impl_normalizes_to_zero(const rustsecp256k1_v0_9_2_fe *r) { +static int rustsecp256k1_v0_10_0_fe_impl_normalizes_to_zero(const rustsecp256k1_v0_10_0_fe *r) { uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4]; /* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */ @@ -161,7 +157,7 @@ static int rustsecp256k1_v0_9_2_fe_impl_normalizes_to_zero(const rustsecp256k1_v return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL); } -static int rustsecp256k1_v0_9_2_fe_impl_normalizes_to_zero_var(const rustsecp256k1_v0_9_2_fe *r) { +static int rustsecp256k1_v0_10_0_fe_impl_normalizes_to_zero_var(const rustsecp256k1_v0_10_0_fe *r) { uint64_t t0, t1, t2, t3, t4; uint64_t z0, z1; uint64_t x; @@ -202,28 +198,28 @@ static int rustsecp256k1_v0_9_2_fe_impl_normalizes_to_zero_var(const rustsecp256 return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL); } -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_impl_set_int(rustsecp256k1_v0_9_2_fe *r, int a) { +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_impl_set_int(rustsecp256k1_v0_10_0_fe *r, int a) { r->n[0] = a; r->n[1] = r->n[2] = r->n[3] = r->n[4] = 0; } -SECP256K1_INLINE static int rustsecp256k1_v0_9_2_fe_impl_is_zero(const rustsecp256k1_v0_9_2_fe *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_10_0_fe_impl_is_zero(const rustsecp256k1_v0_10_0_fe *a) { const uint64_t *t = a->n; return (t[0] | t[1] | t[2] | t[3] | t[4]) == 0; } -SECP256K1_INLINE static int rustsecp256k1_v0_9_2_fe_impl_is_odd(const rustsecp256k1_v0_9_2_fe *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_10_0_fe_impl_is_odd(const rustsecp256k1_v0_10_0_fe *a) { return a->n[0] & 1; } -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_impl_clear(rustsecp256k1_v0_9_2_fe *a) { +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_impl_clear(rustsecp256k1_v0_10_0_fe *a) { int i; for (i=0; i<5; i++) { a->n[i] = 0; } } -static int rustsecp256k1_v0_9_2_fe_impl_cmp_var(const rustsecp256k1_v0_9_2_fe *a, const rustsecp256k1_v0_9_2_fe *b) { +static int rustsecp256k1_v0_10_0_fe_impl_cmp_var(const rustsecp256k1_v0_10_0_fe *a, const rustsecp256k1_v0_10_0_fe *b) { int i; for (i = 4; i >= 0; i--) { if (a->n[i] > b->n[i]) { @@ -236,7 +232,7 @@ static int rustsecp256k1_v0_9_2_fe_impl_cmp_var(const rustsecp256k1_v0_9_2_fe *a return 0; } -static void rustsecp256k1_v0_9_2_fe_impl_set_b32_mod(rustsecp256k1_v0_9_2_fe *r, const unsigned char *a) { +static void rustsecp256k1_v0_10_0_fe_impl_set_b32_mod(rustsecp256k1_v0_10_0_fe *r, const unsigned char *a) { r->n[0] = (uint64_t)a[31] | ((uint64_t)a[30] << 8) | ((uint64_t)a[29] << 16) @@ -273,13 +269,13 @@ static void rustsecp256k1_v0_9_2_fe_impl_set_b32_mod(rustsecp256k1_v0_9_2_fe *r, | ((uint64_t)a[0] << 40); } -static int rustsecp256k1_v0_9_2_fe_impl_set_b32_limit(rustsecp256k1_v0_9_2_fe *r, const unsigned char *a) { - rustsecp256k1_v0_9_2_fe_impl_set_b32_mod(r, a); +static int rustsecp256k1_v0_10_0_fe_impl_set_b32_limit(rustsecp256k1_v0_10_0_fe *r, const unsigned char *a) { + rustsecp256k1_v0_10_0_fe_impl_set_b32_mod(r, a); return !((r->n[4] == 0x0FFFFFFFFFFFFULL) & ((r->n[3] & r->n[2] & r->n[1]) == 0xFFFFFFFFFFFFFULL) & (r->n[0] >= 0xFFFFEFFFFFC2FULL)); } /** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */ -static void rustsecp256k1_v0_9_2_fe_impl_get_b32(unsigned char *r, const rustsecp256k1_v0_9_2_fe *a) { +static void rustsecp256k1_v0_10_0_fe_impl_get_b32(unsigned char *r, const rustsecp256k1_v0_10_0_fe *a) { r[0] = (a->n[4] >> 40) & 0xFF; r[1] = (a->n[4] >> 32) & 0xFF; r[2] = (a->n[4] >> 24) & 0xFF; @@ -314,7 +310,7 @@ static void rustsecp256k1_v0_9_2_fe_impl_get_b32(unsigned char *r, const rustsec r[31] = a->n[0] & 0xFF; } -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_impl_negate_unchecked(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *a, int m) { +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_impl_negate_unchecked(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *a, int m) { /* For all legal values of m (0..31), the following properties hold: */ VERIFY_CHECK(0xFFFFEFFFFFC2FULL * 2 * (m + 1) >= 0xFFFFFFFFFFFFFULL * 2 * m); VERIFY_CHECK(0xFFFFFFFFFFFFFULL * 2 * (m + 1) >= 0xFFFFFFFFFFFFFULL * 2 * m); @@ -329,7 +325,7 @@ SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_impl_negate_unchecked(rusts r->n[4] = 0x0FFFFFFFFFFFFULL * 2 * (m + 1) - a->n[4]; } -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_impl_mul_int_unchecked(rustsecp256k1_v0_9_2_fe *r, int a) { +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_impl_mul_int_unchecked(rustsecp256k1_v0_10_0_fe *r, int a) { r->n[0] *= a; r->n[1] *= a; r->n[2] *= a; @@ -337,11 +333,11 @@ SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_impl_mul_int_unchecked(rust r->n[4] *= a; } -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_impl_add_int(rustsecp256k1_v0_9_2_fe *r, int a) { +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_impl_add_int(rustsecp256k1_v0_10_0_fe *r, int a) { r->n[0] += a; } -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_impl_add(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *a) { +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_impl_add(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *a) { r->n[0] += a->n[0]; r->n[1] += a->n[1]; r->n[2] += a->n[2]; @@ -349,15 +345,15 @@ SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_impl_add(rustsecp256k1_v0_9 r->n[4] += a->n[4]; } -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_impl_mul(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *a, const rustsecp256k1_v0_9_2_fe * SECP256K1_RESTRICT b) { - rustsecp256k1_v0_9_2_fe_mul_inner(r->n, a->n, b->n); +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_impl_mul(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *a, const rustsecp256k1_v0_10_0_fe * SECP256K1_RESTRICT b) { + rustsecp256k1_v0_10_0_fe_mul_inner(r->n, a->n, b->n); } -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_impl_sqr(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *a) { - rustsecp256k1_v0_9_2_fe_sqr_inner(r->n, a->n); +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_impl_sqr(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *a) { + rustsecp256k1_v0_10_0_fe_sqr_inner(r->n, a->n); } -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_impl_cmov(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *a, int flag) { +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_impl_cmov(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *a, int flag) { uint64_t mask0, mask1; volatile int vflag = flag; SECP256K1_CHECKMEM_CHECK_VERIFY(r->n, sizeof(r->n)); @@ -370,7 +366,7 @@ SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_impl_cmov(rustsecp256k1_v0_ r->n[4] = (r->n[4] & mask0) | (a->n[4] & mask1); } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_fe_impl_half(rustsecp256k1_v0_9_2_fe *r) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_fe_impl_half(rustsecp256k1_v0_10_0_fe *r) { uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4]; uint64_t one = (uint64_t)1; uint64_t mask = -(t0 & one) >> 12; @@ -424,7 +420,7 @@ static SECP256K1_INLINE void rustsecp256k1_v0_9_2_fe_impl_half(rustsecp256k1_v0_ */ } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_fe_storage_cmov(rustsecp256k1_v0_9_2_fe_storage *r, const rustsecp256k1_v0_9_2_fe_storage *a, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_fe_storage_cmov(rustsecp256k1_v0_10_0_fe_storage *r, const rustsecp256k1_v0_10_0_fe_storage *a, int flag) { uint64_t mask0, mask1; volatile int vflag = flag; SECP256K1_CHECKMEM_CHECK_VERIFY(r->n, sizeof(r->n)); @@ -436,14 +432,14 @@ static SECP256K1_INLINE void rustsecp256k1_v0_9_2_fe_storage_cmov(rustsecp256k1_ r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1); } -static void rustsecp256k1_v0_9_2_fe_impl_to_storage(rustsecp256k1_v0_9_2_fe_storage *r, const rustsecp256k1_v0_9_2_fe *a) { +static void rustsecp256k1_v0_10_0_fe_impl_to_storage(rustsecp256k1_v0_10_0_fe_storage *r, const rustsecp256k1_v0_10_0_fe *a) { r->n[0] = a->n[0] | a->n[1] << 52; r->n[1] = a->n[1] >> 12 | a->n[2] << 40; r->n[2] = a->n[2] >> 24 | a->n[3] << 28; r->n[3] = a->n[3] >> 36 | a->n[4] << 16; } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_fe_impl_from_storage(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe_storage *a) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_fe_impl_from_storage(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe_storage *a) { r->n[0] = a->n[0] & 0xFFFFFFFFFFFFFULL; r->n[1] = a->n[0] >> 52 | ((a->n[1] << 12) & 0xFFFFFFFFFFFFFULL); r->n[2] = a->n[1] >> 40 | ((a->n[2] << 24) & 0xFFFFFFFFFFFFFULL); @@ -451,11 +447,11 @@ static SECP256K1_INLINE void rustsecp256k1_v0_9_2_fe_impl_from_storage(rustsecp2 r->n[4] = a->n[3] >> 16; } -static void rustsecp256k1_v0_9_2_fe_from_signed62(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_modinv64_signed62 *a) { +static void rustsecp256k1_v0_10_0_fe_from_signed62(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_modinv64_signed62 *a) { const uint64_t M52 = UINT64_MAX >> 12; const uint64_t a0 = a->v[0], a1 = a->v[1], a2 = a->v[2], a3 = a->v[3], a4 = a->v[4]; - /* The output from rustsecp256k1_v0_9_2_modinv64{_var} should be normalized to range [0,modulus), and + /* The output from rustsecp256k1_v0_10_0_modinv64{_var} should be normalized to range [0,modulus), and * have limbs in [0,2^62). The modulus is < 2^256, so the top limb must be below 2^(256-62*4). */ VERIFY_CHECK(a0 >> 62 == 0); @@ -471,7 +467,7 @@ static void rustsecp256k1_v0_9_2_fe_from_signed62(rustsecp256k1_v0_9_2_fe *r, co r->n[4] = (a3 >> 22 | a4 << 40); } -static void rustsecp256k1_v0_9_2_fe_to_signed62(rustsecp256k1_v0_9_2_modinv64_signed62 *r, const rustsecp256k1_v0_9_2_fe *a) { +static void rustsecp256k1_v0_10_0_fe_to_signed62(rustsecp256k1_v0_10_0_modinv64_signed62 *r, const rustsecp256k1_v0_10_0_fe *a) { const uint64_t M62 = UINT64_MAX >> 2; const uint64_t a0 = a->n[0], a1 = a->n[1], a2 = a->n[2], a3 = a->n[3], a4 = a->n[4]; @@ -482,48 +478,48 @@ static void rustsecp256k1_v0_9_2_fe_to_signed62(rustsecp256k1_v0_9_2_modinv64_si r->v[4] = a4 >> 40; } -static const rustsecp256k1_v0_9_2_modinv64_modinfo rustsecp256k1_v0_9_2_const_modinfo_fe = { +static const rustsecp256k1_v0_10_0_modinv64_modinfo rustsecp256k1_v0_10_0_const_modinfo_fe = { {{-0x1000003D1LL, 0, 0, 0, 256}}, 0x27C7F6E22DDACACFLL }; -static void rustsecp256k1_v0_9_2_fe_impl_inv(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *x) { - rustsecp256k1_v0_9_2_fe tmp = *x; - rustsecp256k1_v0_9_2_modinv64_signed62 s; +static void rustsecp256k1_v0_10_0_fe_impl_inv(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *x) { + rustsecp256k1_v0_10_0_fe tmp = *x; + rustsecp256k1_v0_10_0_modinv64_signed62 s; - rustsecp256k1_v0_9_2_fe_normalize(&tmp); - rustsecp256k1_v0_9_2_fe_to_signed62(&s, &tmp); - rustsecp256k1_v0_9_2_modinv64(&s, &rustsecp256k1_v0_9_2_const_modinfo_fe); - rustsecp256k1_v0_9_2_fe_from_signed62(r, &s); + rustsecp256k1_v0_10_0_fe_normalize(&tmp); + rustsecp256k1_v0_10_0_fe_to_signed62(&s, &tmp); + rustsecp256k1_v0_10_0_modinv64(&s, &rustsecp256k1_v0_10_0_const_modinfo_fe); + rustsecp256k1_v0_10_0_fe_from_signed62(r, &s); } -static void rustsecp256k1_v0_9_2_fe_impl_inv_var(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *x) { - rustsecp256k1_v0_9_2_fe tmp = *x; - rustsecp256k1_v0_9_2_modinv64_signed62 s; +static void rustsecp256k1_v0_10_0_fe_impl_inv_var(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *x) { + rustsecp256k1_v0_10_0_fe tmp = *x; + rustsecp256k1_v0_10_0_modinv64_signed62 s; - rustsecp256k1_v0_9_2_fe_normalize_var(&tmp); - rustsecp256k1_v0_9_2_fe_to_signed62(&s, &tmp); - rustsecp256k1_v0_9_2_modinv64_var(&s, &rustsecp256k1_v0_9_2_const_modinfo_fe); - rustsecp256k1_v0_9_2_fe_from_signed62(r, &s); + rustsecp256k1_v0_10_0_fe_normalize_var(&tmp); + rustsecp256k1_v0_10_0_fe_to_signed62(&s, &tmp); + rustsecp256k1_v0_10_0_modinv64_var(&s, &rustsecp256k1_v0_10_0_const_modinfo_fe); + rustsecp256k1_v0_10_0_fe_from_signed62(r, &s); } -static int rustsecp256k1_v0_9_2_fe_impl_is_square_var(const rustsecp256k1_v0_9_2_fe *x) { - rustsecp256k1_v0_9_2_fe tmp; - rustsecp256k1_v0_9_2_modinv64_signed62 s; +static int rustsecp256k1_v0_10_0_fe_impl_is_square_var(const rustsecp256k1_v0_10_0_fe *x) { + rustsecp256k1_v0_10_0_fe tmp; + rustsecp256k1_v0_10_0_modinv64_signed62 s; int jac, ret; tmp = *x; - rustsecp256k1_v0_9_2_fe_normalize_var(&tmp); - /* rustsecp256k1_v0_9_2_jacobi64_maybe_var cannot deal with input 0. */ - if (rustsecp256k1_v0_9_2_fe_is_zero(&tmp)) return 1; - rustsecp256k1_v0_9_2_fe_to_signed62(&s, &tmp); - jac = rustsecp256k1_v0_9_2_jacobi64_maybe_var(&s, &rustsecp256k1_v0_9_2_const_modinfo_fe); + rustsecp256k1_v0_10_0_fe_normalize_var(&tmp); + /* rustsecp256k1_v0_10_0_jacobi64_maybe_var cannot deal with input 0. */ + if (rustsecp256k1_v0_10_0_fe_is_zero(&tmp)) return 1; + rustsecp256k1_v0_10_0_fe_to_signed62(&s, &tmp); + jac = rustsecp256k1_v0_10_0_jacobi64_maybe_var(&s, &rustsecp256k1_v0_10_0_const_modinfo_fe); if (jac == 0) { - /* rustsecp256k1_v0_9_2_jacobi64_maybe_var failed to compute the Jacobi symbol. Fall back + /* rustsecp256k1_v0_10_0_jacobi64_maybe_var failed to compute the Jacobi symbol. Fall back * to computing a square root. This should be extremely rare with random * input (except in VERIFY mode, where a lower iteration count is used). */ - rustsecp256k1_v0_9_2_fe dummy; - ret = rustsecp256k1_v0_9_2_fe_sqrt(&dummy, &tmp); + rustsecp256k1_v0_10_0_fe dummy; + ret = rustsecp256k1_v0_10_0_fe_sqrt(&dummy, &tmp); } else { ret = jac >= 0; } diff --git a/secp256k1-sys/depend/secp256k1/src/field_5x52_int128_impl.h b/secp256k1-sys/depend/secp256k1/src/field_5x52_int128_impl.h index 1910e92f4..aad6ad4bd 100644 --- a/secp256k1-sys/depend/secp256k1/src/field_5x52_int128_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/field_5x52_int128_impl.h @@ -12,16 +12,11 @@ #include "int128.h" #include "util.h" -#ifdef VERIFY #define VERIFY_BITS(x, n) VERIFY_CHECK(((x) >> (n)) == 0) -#define VERIFY_BITS_128(x, n) VERIFY_CHECK(rustsecp256k1_v0_9_2_u128_check_bits((x), (n))) -#else -#define VERIFY_BITS(x, n) do { } while(0) -#define VERIFY_BITS_128(x, n) do { } while(0) -#endif +#define VERIFY_BITS_128(x, n) VERIFY_CHECK(rustsecp256k1_v0_10_0_u128_check_bits((x), (n))) -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) { - rustsecp256k1_v0_9_2_uint128 c, d; +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) { + rustsecp256k1_v0_10_0_uint128 c, d; uint64_t t3, t4, tx, u0; uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4]; const uint64_t M = 0xFFFFFFFFFFFFFULL, R = 0x1000003D10ULL; @@ -45,35 +40,35 @@ SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_mul_inner(uint64_t *r, cons * Note that [x 0 0 0 0 0] = [x*R]. */ - rustsecp256k1_v0_9_2_u128_mul(&d, a0, b[3]); - rustsecp256k1_v0_9_2_u128_accum_mul(&d, a1, b[2]); - rustsecp256k1_v0_9_2_u128_accum_mul(&d, a2, b[1]); - rustsecp256k1_v0_9_2_u128_accum_mul(&d, a3, b[0]); + rustsecp256k1_v0_10_0_u128_mul(&d, a0, b[3]); + rustsecp256k1_v0_10_0_u128_accum_mul(&d, a1, b[2]); + rustsecp256k1_v0_10_0_u128_accum_mul(&d, a2, b[1]); + rustsecp256k1_v0_10_0_u128_accum_mul(&d, a3, b[0]); VERIFY_BITS_128(&d, 114); /* [d 0 0 0] = [p3 0 0 0] */ - rustsecp256k1_v0_9_2_u128_mul(&c, a4, b[4]); + rustsecp256k1_v0_10_0_u128_mul(&c, a4, b[4]); VERIFY_BITS_128(&c, 112); /* [c 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ - rustsecp256k1_v0_9_2_u128_accum_mul(&d, R, rustsecp256k1_v0_9_2_u128_to_u64(&c)); rustsecp256k1_v0_9_2_u128_rshift(&c, 64); + rustsecp256k1_v0_10_0_u128_accum_mul(&d, R, rustsecp256k1_v0_10_0_u128_to_u64(&c)); rustsecp256k1_v0_10_0_u128_rshift(&c, 64); VERIFY_BITS_128(&d, 115); VERIFY_BITS_128(&c, 48); /* [(c<<12) 0 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ - t3 = rustsecp256k1_v0_9_2_u128_to_u64(&d) & M; rustsecp256k1_v0_9_2_u128_rshift(&d, 52); + t3 = rustsecp256k1_v0_10_0_u128_to_u64(&d) & M; rustsecp256k1_v0_10_0_u128_rshift(&d, 52); VERIFY_BITS(t3, 52); VERIFY_BITS_128(&d, 63); /* [(c<<12) 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ - rustsecp256k1_v0_9_2_u128_accum_mul(&d, a0, b[4]); - rustsecp256k1_v0_9_2_u128_accum_mul(&d, a1, b[3]); - rustsecp256k1_v0_9_2_u128_accum_mul(&d, a2, b[2]); - rustsecp256k1_v0_9_2_u128_accum_mul(&d, a3, b[1]); - rustsecp256k1_v0_9_2_u128_accum_mul(&d, a4, b[0]); + rustsecp256k1_v0_10_0_u128_accum_mul(&d, a0, b[4]); + rustsecp256k1_v0_10_0_u128_accum_mul(&d, a1, b[3]); + rustsecp256k1_v0_10_0_u128_accum_mul(&d, a2, b[2]); + rustsecp256k1_v0_10_0_u128_accum_mul(&d, a3, b[1]); + rustsecp256k1_v0_10_0_u128_accum_mul(&d, a4, b[0]); VERIFY_BITS_128(&d, 115); /* [(c<<12) 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ - rustsecp256k1_v0_9_2_u128_accum_mul(&d, R << 12, rustsecp256k1_v0_9_2_u128_to_u64(&c)); + rustsecp256k1_v0_10_0_u128_accum_mul(&d, R << 12, rustsecp256k1_v0_10_0_u128_to_u64(&c)); VERIFY_BITS_128(&d, 116); /* [d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ - t4 = rustsecp256k1_v0_9_2_u128_to_u64(&d) & M; rustsecp256k1_v0_9_2_u128_rshift(&d, 52); + t4 = rustsecp256k1_v0_10_0_u128_to_u64(&d) & M; rustsecp256k1_v0_10_0_u128_rshift(&d, 52); VERIFY_BITS(t4, 52); VERIFY_BITS_128(&d, 64); /* [d t4 t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ @@ -82,84 +77,84 @@ SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_mul_inner(uint64_t *r, cons VERIFY_BITS(t4, 48); /* [d t4+(tx<<48) t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ - rustsecp256k1_v0_9_2_u128_mul(&c, a0, b[0]); + rustsecp256k1_v0_10_0_u128_mul(&c, a0, b[0]); VERIFY_BITS_128(&c, 112); /* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 0 p4 p3 0 0 p0] */ - rustsecp256k1_v0_9_2_u128_accum_mul(&d, a1, b[4]); - rustsecp256k1_v0_9_2_u128_accum_mul(&d, a2, b[3]); - rustsecp256k1_v0_9_2_u128_accum_mul(&d, a3, b[2]); - rustsecp256k1_v0_9_2_u128_accum_mul(&d, a4, b[1]); - VERIFY_BITS_128(&d, 115); + rustsecp256k1_v0_10_0_u128_accum_mul(&d, a1, b[4]); + rustsecp256k1_v0_10_0_u128_accum_mul(&d, a2, b[3]); + rustsecp256k1_v0_10_0_u128_accum_mul(&d, a3, b[2]); + rustsecp256k1_v0_10_0_u128_accum_mul(&d, a4, b[1]); + VERIFY_BITS_128(&d, 114); /* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ - u0 = rustsecp256k1_v0_9_2_u128_to_u64(&d) & M; rustsecp256k1_v0_9_2_u128_rshift(&d, 52); + u0 = rustsecp256k1_v0_10_0_u128_to_u64(&d) & M; rustsecp256k1_v0_10_0_u128_rshift(&d, 52); VERIFY_BITS(u0, 52); - VERIFY_BITS_128(&d, 63); + VERIFY_BITS_128(&d, 62); /* [d u0 t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ /* [d 0 t4+(tx<<48)+(u0<<52) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ u0 = (u0 << 4) | tx; VERIFY_BITS(u0, 56); /* [d 0 t4+(u0<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ - rustsecp256k1_v0_9_2_u128_accum_mul(&c, u0, R >> 4); - VERIFY_BITS_128(&c, 115); + rustsecp256k1_v0_10_0_u128_accum_mul(&c, u0, R >> 4); + VERIFY_BITS_128(&c, 113); /* [d 0 t4 t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ - r[0] = rustsecp256k1_v0_9_2_u128_to_u64(&c) & M; rustsecp256k1_v0_9_2_u128_rshift(&c, 52); + r[0] = rustsecp256k1_v0_10_0_u128_to_u64(&c) & M; rustsecp256k1_v0_10_0_u128_rshift(&c, 52); VERIFY_BITS(r[0], 52); VERIFY_BITS_128(&c, 61); /* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 0 p0] */ - rustsecp256k1_v0_9_2_u128_accum_mul(&c, a0, b[1]); - rustsecp256k1_v0_9_2_u128_accum_mul(&c, a1, b[0]); + rustsecp256k1_v0_10_0_u128_accum_mul(&c, a0, b[1]); + rustsecp256k1_v0_10_0_u128_accum_mul(&c, a1, b[0]); VERIFY_BITS_128(&c, 114); /* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 p1 p0] */ - rustsecp256k1_v0_9_2_u128_accum_mul(&d, a2, b[4]); - rustsecp256k1_v0_9_2_u128_accum_mul(&d, a3, b[3]); - rustsecp256k1_v0_9_2_u128_accum_mul(&d, a4, b[2]); + rustsecp256k1_v0_10_0_u128_accum_mul(&d, a2, b[4]); + rustsecp256k1_v0_10_0_u128_accum_mul(&d, a3, b[3]); + rustsecp256k1_v0_10_0_u128_accum_mul(&d, a4, b[2]); VERIFY_BITS_128(&d, 114); /* [d 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ - rustsecp256k1_v0_9_2_u128_accum_mul(&c, rustsecp256k1_v0_9_2_u128_to_u64(&d) & M, R); rustsecp256k1_v0_9_2_u128_rshift(&d, 52); + rustsecp256k1_v0_10_0_u128_accum_mul(&c, rustsecp256k1_v0_10_0_u128_to_u64(&d) & M, R); rustsecp256k1_v0_10_0_u128_rshift(&d, 52); VERIFY_BITS_128(&c, 115); VERIFY_BITS_128(&d, 62); /* [d 0 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ - r[1] = rustsecp256k1_v0_9_2_u128_to_u64(&c) & M; rustsecp256k1_v0_9_2_u128_rshift(&c, 52); + r[1] = rustsecp256k1_v0_10_0_u128_to_u64(&c) & M; rustsecp256k1_v0_10_0_u128_rshift(&c, 52); VERIFY_BITS(r[1], 52); VERIFY_BITS_128(&c, 63); /* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ - rustsecp256k1_v0_9_2_u128_accum_mul(&c, a0, b[2]); - rustsecp256k1_v0_9_2_u128_accum_mul(&c, a1, b[1]); - rustsecp256k1_v0_9_2_u128_accum_mul(&c, a2, b[0]); + rustsecp256k1_v0_10_0_u128_accum_mul(&c, a0, b[2]); + rustsecp256k1_v0_10_0_u128_accum_mul(&c, a1, b[1]); + rustsecp256k1_v0_10_0_u128_accum_mul(&c, a2, b[0]); VERIFY_BITS_128(&c, 114); /* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 p2 p1 p0] */ - rustsecp256k1_v0_9_2_u128_accum_mul(&d, a3, b[4]); - rustsecp256k1_v0_9_2_u128_accum_mul(&d, a4, b[3]); + rustsecp256k1_v0_10_0_u128_accum_mul(&d, a3, b[4]); + rustsecp256k1_v0_10_0_u128_accum_mul(&d, a4, b[3]); VERIFY_BITS_128(&d, 114); /* [d 0 0 t4 t3 c t1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - rustsecp256k1_v0_9_2_u128_accum_mul(&c, R, rustsecp256k1_v0_9_2_u128_to_u64(&d)); rustsecp256k1_v0_9_2_u128_rshift(&d, 64); + rustsecp256k1_v0_10_0_u128_accum_mul(&c, R, rustsecp256k1_v0_10_0_u128_to_u64(&d)); rustsecp256k1_v0_10_0_u128_rshift(&d, 64); VERIFY_BITS_128(&c, 115); VERIFY_BITS_128(&d, 50); /* [(d<<12) 0 0 0 t4 t3 c r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[2] = rustsecp256k1_v0_9_2_u128_to_u64(&c) & M; rustsecp256k1_v0_9_2_u128_rshift(&c, 52); + r[2] = rustsecp256k1_v0_10_0_u128_to_u64(&c) & M; rustsecp256k1_v0_10_0_u128_rshift(&c, 52); VERIFY_BITS(r[2], 52); VERIFY_BITS_128(&c, 63); /* [(d<<12) 0 0 0 t4 t3+c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - rustsecp256k1_v0_9_2_u128_accum_mul(&c, R << 12, rustsecp256k1_v0_9_2_u128_to_u64(&d)); - rustsecp256k1_v0_9_2_u128_accum_u64(&c, t3); + rustsecp256k1_v0_10_0_u128_accum_mul(&c, R << 12, rustsecp256k1_v0_10_0_u128_to_u64(&d)); + rustsecp256k1_v0_10_0_u128_accum_u64(&c, t3); VERIFY_BITS_128(&c, 100); /* [t4 c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[3] = rustsecp256k1_v0_9_2_u128_to_u64(&c) & M; rustsecp256k1_v0_9_2_u128_rshift(&c, 52); + r[3] = rustsecp256k1_v0_10_0_u128_to_u64(&c) & M; rustsecp256k1_v0_10_0_u128_rshift(&c, 52); VERIFY_BITS(r[3], 52); VERIFY_BITS_128(&c, 48); /* [t4+c r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[4] = rustsecp256k1_v0_9_2_u128_to_u64(&c) + t4; + r[4] = rustsecp256k1_v0_10_0_u128_to_u64(&c) + t4; VERIFY_BITS(r[4], 49); /* [r4 r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ } -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_sqr_inner(uint64_t *r, const uint64_t *a) { - rustsecp256k1_v0_9_2_uint128 c, d; +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_sqr_inner(uint64_t *r, const uint64_t *a) { + rustsecp256k1_v0_10_0_uint128 c, d; uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4]; - int64_t t3, t4, tx, u0; + uint64_t t3, t4, tx, u0; const uint64_t M = 0xFFFFFFFFFFFFFULL, R = 0x1000003D10ULL; VERIFY_BITS(a[0], 56); @@ -173,32 +168,32 @@ SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_sqr_inner(uint64_t *r, cons * Note that [x 0 0 0 0 0] = [x*R]. */ - rustsecp256k1_v0_9_2_u128_mul(&d, a0*2, a3); - rustsecp256k1_v0_9_2_u128_accum_mul(&d, a1*2, a2); + rustsecp256k1_v0_10_0_u128_mul(&d, a0*2, a3); + rustsecp256k1_v0_10_0_u128_accum_mul(&d, a1*2, a2); VERIFY_BITS_128(&d, 114); /* [d 0 0 0] = [p3 0 0 0] */ - rustsecp256k1_v0_9_2_u128_mul(&c, a4, a4); + rustsecp256k1_v0_10_0_u128_mul(&c, a4, a4); VERIFY_BITS_128(&c, 112); /* [c 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ - rustsecp256k1_v0_9_2_u128_accum_mul(&d, R, rustsecp256k1_v0_9_2_u128_to_u64(&c)); rustsecp256k1_v0_9_2_u128_rshift(&c, 64); + rustsecp256k1_v0_10_0_u128_accum_mul(&d, R, rustsecp256k1_v0_10_0_u128_to_u64(&c)); rustsecp256k1_v0_10_0_u128_rshift(&c, 64); VERIFY_BITS_128(&d, 115); VERIFY_BITS_128(&c, 48); /* [(c<<12) 0 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ - t3 = rustsecp256k1_v0_9_2_u128_to_u64(&d) & M; rustsecp256k1_v0_9_2_u128_rshift(&d, 52); + t3 = rustsecp256k1_v0_10_0_u128_to_u64(&d) & M; rustsecp256k1_v0_10_0_u128_rshift(&d, 52); VERIFY_BITS(t3, 52); VERIFY_BITS_128(&d, 63); /* [(c<<12) 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ a4 *= 2; - rustsecp256k1_v0_9_2_u128_accum_mul(&d, a0, a4); - rustsecp256k1_v0_9_2_u128_accum_mul(&d, a1*2, a3); - rustsecp256k1_v0_9_2_u128_accum_mul(&d, a2, a2); + rustsecp256k1_v0_10_0_u128_accum_mul(&d, a0, a4); + rustsecp256k1_v0_10_0_u128_accum_mul(&d, a1*2, a3); + rustsecp256k1_v0_10_0_u128_accum_mul(&d, a2, a2); VERIFY_BITS_128(&d, 115); /* [(c<<12) 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ - rustsecp256k1_v0_9_2_u128_accum_mul(&d, R << 12, rustsecp256k1_v0_9_2_u128_to_u64(&c)); + rustsecp256k1_v0_10_0_u128_accum_mul(&d, R << 12, rustsecp256k1_v0_10_0_u128_to_u64(&c)); VERIFY_BITS_128(&d, 116); /* [d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ - t4 = rustsecp256k1_v0_9_2_u128_to_u64(&d) & M; rustsecp256k1_v0_9_2_u128_rshift(&d, 52); + t4 = rustsecp256k1_v0_10_0_u128_to_u64(&d) & M; rustsecp256k1_v0_10_0_u128_rshift(&d, 52); VERIFY_BITS(t4, 52); VERIFY_BITS_128(&d, 64); /* [d t4 t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ @@ -207,14 +202,14 @@ SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_sqr_inner(uint64_t *r, cons VERIFY_BITS(t4, 48); /* [d t4+(tx<<48) t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ - rustsecp256k1_v0_9_2_u128_mul(&c, a0, a0); + rustsecp256k1_v0_10_0_u128_mul(&c, a0, a0); VERIFY_BITS_128(&c, 112); /* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 0 p4 p3 0 0 p0] */ - rustsecp256k1_v0_9_2_u128_accum_mul(&d, a1, a4); - rustsecp256k1_v0_9_2_u128_accum_mul(&d, a2*2, a3); + rustsecp256k1_v0_10_0_u128_accum_mul(&d, a1, a4); + rustsecp256k1_v0_10_0_u128_accum_mul(&d, a2*2, a3); VERIFY_BITS_128(&d, 114); /* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ - u0 = rustsecp256k1_v0_9_2_u128_to_u64(&d) & M; rustsecp256k1_v0_9_2_u128_rshift(&d, 52); + u0 = rustsecp256k1_v0_10_0_u128_to_u64(&d) & M; rustsecp256k1_v0_10_0_u128_rshift(&d, 52); VERIFY_BITS(u0, 52); VERIFY_BITS_128(&d, 62); /* [d u0 t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ @@ -222,56 +217,56 @@ SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_sqr_inner(uint64_t *r, cons u0 = (u0 << 4) | tx; VERIFY_BITS(u0, 56); /* [d 0 t4+(u0<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ - rustsecp256k1_v0_9_2_u128_accum_mul(&c, u0, R >> 4); + rustsecp256k1_v0_10_0_u128_accum_mul(&c, u0, R >> 4); VERIFY_BITS_128(&c, 113); /* [d 0 t4 t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ - r[0] = rustsecp256k1_v0_9_2_u128_to_u64(&c) & M; rustsecp256k1_v0_9_2_u128_rshift(&c, 52); + r[0] = rustsecp256k1_v0_10_0_u128_to_u64(&c) & M; rustsecp256k1_v0_10_0_u128_rshift(&c, 52); VERIFY_BITS(r[0], 52); VERIFY_BITS_128(&c, 61); /* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 0 p0] */ a0 *= 2; - rustsecp256k1_v0_9_2_u128_accum_mul(&c, a0, a1); + rustsecp256k1_v0_10_0_u128_accum_mul(&c, a0, a1); VERIFY_BITS_128(&c, 114); /* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 p1 p0] */ - rustsecp256k1_v0_9_2_u128_accum_mul(&d, a2, a4); - rustsecp256k1_v0_9_2_u128_accum_mul(&d, a3, a3); + rustsecp256k1_v0_10_0_u128_accum_mul(&d, a2, a4); + rustsecp256k1_v0_10_0_u128_accum_mul(&d, a3, a3); VERIFY_BITS_128(&d, 114); /* [d 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ - rustsecp256k1_v0_9_2_u128_accum_mul(&c, rustsecp256k1_v0_9_2_u128_to_u64(&d) & M, R); rustsecp256k1_v0_9_2_u128_rshift(&d, 52); + rustsecp256k1_v0_10_0_u128_accum_mul(&c, rustsecp256k1_v0_10_0_u128_to_u64(&d) & M, R); rustsecp256k1_v0_10_0_u128_rshift(&d, 52); VERIFY_BITS_128(&c, 115); VERIFY_BITS_128(&d, 62); /* [d 0 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ - r[1] = rustsecp256k1_v0_9_2_u128_to_u64(&c) & M; rustsecp256k1_v0_9_2_u128_rshift(&c, 52); + r[1] = rustsecp256k1_v0_10_0_u128_to_u64(&c) & M; rustsecp256k1_v0_10_0_u128_rshift(&c, 52); VERIFY_BITS(r[1], 52); VERIFY_BITS_128(&c, 63); /* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ - rustsecp256k1_v0_9_2_u128_accum_mul(&c, a0, a2); - rustsecp256k1_v0_9_2_u128_accum_mul(&c, a1, a1); + rustsecp256k1_v0_10_0_u128_accum_mul(&c, a0, a2); + rustsecp256k1_v0_10_0_u128_accum_mul(&c, a1, a1); VERIFY_BITS_128(&c, 114); /* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 p2 p1 p0] */ - rustsecp256k1_v0_9_2_u128_accum_mul(&d, a3, a4); + rustsecp256k1_v0_10_0_u128_accum_mul(&d, a3, a4); VERIFY_BITS_128(&d, 114); /* [d 0 0 t4 t3 c r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - rustsecp256k1_v0_9_2_u128_accum_mul(&c, R, rustsecp256k1_v0_9_2_u128_to_u64(&d)); rustsecp256k1_v0_9_2_u128_rshift(&d, 64); + rustsecp256k1_v0_10_0_u128_accum_mul(&c, R, rustsecp256k1_v0_10_0_u128_to_u64(&d)); rustsecp256k1_v0_10_0_u128_rshift(&d, 64); VERIFY_BITS_128(&c, 115); VERIFY_BITS_128(&d, 50); /* [(d<<12) 0 0 0 t4 t3 c r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[2] = rustsecp256k1_v0_9_2_u128_to_u64(&c) & M; rustsecp256k1_v0_9_2_u128_rshift(&c, 52); + r[2] = rustsecp256k1_v0_10_0_u128_to_u64(&c) & M; rustsecp256k1_v0_10_0_u128_rshift(&c, 52); VERIFY_BITS(r[2], 52); VERIFY_BITS_128(&c, 63); /* [(d<<12) 0 0 0 t4 t3+c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - rustsecp256k1_v0_9_2_u128_accum_mul(&c, R << 12, rustsecp256k1_v0_9_2_u128_to_u64(&d)); - rustsecp256k1_v0_9_2_u128_accum_u64(&c, t3); + rustsecp256k1_v0_10_0_u128_accum_mul(&c, R << 12, rustsecp256k1_v0_10_0_u128_to_u64(&d)); + rustsecp256k1_v0_10_0_u128_accum_u64(&c, t3); VERIFY_BITS_128(&c, 100); /* [t4 c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[3] = rustsecp256k1_v0_9_2_u128_to_u64(&c) & M; rustsecp256k1_v0_9_2_u128_rshift(&c, 52); + r[3] = rustsecp256k1_v0_10_0_u128_to_u64(&c) & M; rustsecp256k1_v0_10_0_u128_rshift(&c, 52); VERIFY_BITS(r[3], 52); VERIFY_BITS_128(&c, 48); /* [t4+c r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[4] = rustsecp256k1_v0_9_2_u128_to_u64(&c) + t4; + r[4] = rustsecp256k1_v0_10_0_u128_to_u64(&c) + t4; VERIFY_BITS(r[4], 49); /* [r4 r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ } diff --git a/secp256k1-sys/depend/secp256k1/src/field_impl.h b/secp256k1-sys/depend/secp256k1/src/field_impl.h index 08c094643..0943f959b 100644 --- a/secp256k1-sys/depend/secp256k1/src/field_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/field_impl.h @@ -18,20 +18,19 @@ #error "Please select wide multiplication implementation" #endif -SECP256K1_INLINE static int rustsecp256k1_v0_9_2_fe_equal(const rustsecp256k1_v0_9_2_fe *a, const rustsecp256k1_v0_9_2_fe *b) { - rustsecp256k1_v0_9_2_fe na; -#ifdef VERIFY - rustsecp256k1_v0_9_2_fe_verify(a); - rustsecp256k1_v0_9_2_fe_verify(b); - rustsecp256k1_v0_9_2_fe_verify_magnitude(a, 1); - rustsecp256k1_v0_9_2_fe_verify_magnitude(b, 31); -#endif - rustsecp256k1_v0_9_2_fe_negate(&na, a, 1); - rustsecp256k1_v0_9_2_fe_add(&na, b); - return rustsecp256k1_v0_9_2_fe_normalizes_to_zero(&na); +SECP256K1_INLINE static int rustsecp256k1_v0_10_0_fe_equal(const rustsecp256k1_v0_10_0_fe *a, const rustsecp256k1_v0_10_0_fe *b) { + rustsecp256k1_v0_10_0_fe na; + SECP256K1_FE_VERIFY(a); + SECP256K1_FE_VERIFY(b); + SECP256K1_FE_VERIFY_MAGNITUDE(a, 1); + SECP256K1_FE_VERIFY_MAGNITUDE(b, 31); + + rustsecp256k1_v0_10_0_fe_negate(&na, a, 1); + rustsecp256k1_v0_10_0_fe_add(&na, b); + return rustsecp256k1_v0_10_0_fe_normalizes_to_zero(&na); } -static int rustsecp256k1_v0_9_2_fe_sqrt(rustsecp256k1_v0_9_2_fe * SECP256K1_RESTRICT r, const rustsecp256k1_v0_9_2_fe * SECP256K1_RESTRICT a) { +static int rustsecp256k1_v0_10_0_fe_sqrt(rustsecp256k1_v0_10_0_fe * SECP256K1_RESTRICT r, const rustsecp256k1_v0_10_0_fe * SECP256K1_RESTRICT a) { /** Given that p is congruent to 3 mod 4, we can compute the square root of * a mod p as the (p+1)/4'th power of a. * @@ -41,233 +40,248 @@ static int rustsecp256k1_v0_9_2_fe_sqrt(rustsecp256k1_v0_9_2_fe * SECP256K1_REST * Also because (p+1)/4 is an even number, the computed square root is * itself always a square (a ** ((p+1)/4) is the square of a ** ((p+1)/8)). */ - rustsecp256k1_v0_9_2_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1; + rustsecp256k1_v0_10_0_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1; int j, ret; -#ifdef VERIFY VERIFY_CHECK(r != a); - rustsecp256k1_v0_9_2_fe_verify(a); - rustsecp256k1_v0_9_2_fe_verify_magnitude(a, 8); -#endif + SECP256K1_FE_VERIFY(a); + SECP256K1_FE_VERIFY_MAGNITUDE(a, 8); /** The binary representation of (p + 1)/4 has 3 blocks of 1s, with lengths in * { 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block: * 1, [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223] */ - rustsecp256k1_v0_9_2_fe_sqr(&x2, a); - rustsecp256k1_v0_9_2_fe_mul(&x2, &x2, a); + rustsecp256k1_v0_10_0_fe_sqr(&x2, a); + rustsecp256k1_v0_10_0_fe_mul(&x2, &x2, a); - rustsecp256k1_v0_9_2_fe_sqr(&x3, &x2); - rustsecp256k1_v0_9_2_fe_mul(&x3, &x3, a); + rustsecp256k1_v0_10_0_fe_sqr(&x3, &x2); + rustsecp256k1_v0_10_0_fe_mul(&x3, &x3, a); x6 = x3; for (j=0; j<3; j++) { - rustsecp256k1_v0_9_2_fe_sqr(&x6, &x6); + rustsecp256k1_v0_10_0_fe_sqr(&x6, &x6); } - rustsecp256k1_v0_9_2_fe_mul(&x6, &x6, &x3); + rustsecp256k1_v0_10_0_fe_mul(&x6, &x6, &x3); x9 = x6; for (j=0; j<3; j++) { - rustsecp256k1_v0_9_2_fe_sqr(&x9, &x9); + rustsecp256k1_v0_10_0_fe_sqr(&x9, &x9); } - rustsecp256k1_v0_9_2_fe_mul(&x9, &x9, &x3); + rustsecp256k1_v0_10_0_fe_mul(&x9, &x9, &x3); x11 = x9; for (j=0; j<2; j++) { - rustsecp256k1_v0_9_2_fe_sqr(&x11, &x11); + rustsecp256k1_v0_10_0_fe_sqr(&x11, &x11); } - rustsecp256k1_v0_9_2_fe_mul(&x11, &x11, &x2); + rustsecp256k1_v0_10_0_fe_mul(&x11, &x11, &x2); x22 = x11; for (j=0; j<11; j++) { - rustsecp256k1_v0_9_2_fe_sqr(&x22, &x22); + rustsecp256k1_v0_10_0_fe_sqr(&x22, &x22); } - rustsecp256k1_v0_9_2_fe_mul(&x22, &x22, &x11); + rustsecp256k1_v0_10_0_fe_mul(&x22, &x22, &x11); x44 = x22; for (j=0; j<22; j++) { - rustsecp256k1_v0_9_2_fe_sqr(&x44, &x44); + rustsecp256k1_v0_10_0_fe_sqr(&x44, &x44); } - rustsecp256k1_v0_9_2_fe_mul(&x44, &x44, &x22); + rustsecp256k1_v0_10_0_fe_mul(&x44, &x44, &x22); x88 = x44; for (j=0; j<44; j++) { - rustsecp256k1_v0_9_2_fe_sqr(&x88, &x88); + rustsecp256k1_v0_10_0_fe_sqr(&x88, &x88); } - rustsecp256k1_v0_9_2_fe_mul(&x88, &x88, &x44); + rustsecp256k1_v0_10_0_fe_mul(&x88, &x88, &x44); x176 = x88; for (j=0; j<88; j++) { - rustsecp256k1_v0_9_2_fe_sqr(&x176, &x176); + rustsecp256k1_v0_10_0_fe_sqr(&x176, &x176); } - rustsecp256k1_v0_9_2_fe_mul(&x176, &x176, &x88); + rustsecp256k1_v0_10_0_fe_mul(&x176, &x176, &x88); x220 = x176; for (j=0; j<44; j++) { - rustsecp256k1_v0_9_2_fe_sqr(&x220, &x220); + rustsecp256k1_v0_10_0_fe_sqr(&x220, &x220); } - rustsecp256k1_v0_9_2_fe_mul(&x220, &x220, &x44); + rustsecp256k1_v0_10_0_fe_mul(&x220, &x220, &x44); x223 = x220; for (j=0; j<3; j++) { - rustsecp256k1_v0_9_2_fe_sqr(&x223, &x223); + rustsecp256k1_v0_10_0_fe_sqr(&x223, &x223); } - rustsecp256k1_v0_9_2_fe_mul(&x223, &x223, &x3); + rustsecp256k1_v0_10_0_fe_mul(&x223, &x223, &x3); /* The final result is then assembled using a sliding window over the blocks. */ t1 = x223; for (j=0; j<23; j++) { - rustsecp256k1_v0_9_2_fe_sqr(&t1, &t1); + rustsecp256k1_v0_10_0_fe_sqr(&t1, &t1); } - rustsecp256k1_v0_9_2_fe_mul(&t1, &t1, &x22); + rustsecp256k1_v0_10_0_fe_mul(&t1, &t1, &x22); for (j=0; j<6; j++) { - rustsecp256k1_v0_9_2_fe_sqr(&t1, &t1); + rustsecp256k1_v0_10_0_fe_sqr(&t1, &t1); } - rustsecp256k1_v0_9_2_fe_mul(&t1, &t1, &x2); - rustsecp256k1_v0_9_2_fe_sqr(&t1, &t1); - rustsecp256k1_v0_9_2_fe_sqr(r, &t1); + rustsecp256k1_v0_10_0_fe_mul(&t1, &t1, &x2); + rustsecp256k1_v0_10_0_fe_sqr(&t1, &t1); + rustsecp256k1_v0_10_0_fe_sqr(r, &t1); /* Check that a square root was actually calculated */ - rustsecp256k1_v0_9_2_fe_sqr(&t1, r); - ret = rustsecp256k1_v0_9_2_fe_equal(&t1, a); + rustsecp256k1_v0_10_0_fe_sqr(&t1, r); + ret = rustsecp256k1_v0_10_0_fe_equal(&t1, a); #ifdef VERIFY if (!ret) { - rustsecp256k1_v0_9_2_fe_negate(&t1, &t1, 1); - rustsecp256k1_v0_9_2_fe_normalize_var(&t1); - VERIFY_CHECK(rustsecp256k1_v0_9_2_fe_equal(&t1, a)); + rustsecp256k1_v0_10_0_fe_negate(&t1, &t1, 1); + rustsecp256k1_v0_10_0_fe_normalize_var(&t1); + VERIFY_CHECK(rustsecp256k1_v0_10_0_fe_equal(&t1, a)); } #endif return ret; } #ifndef VERIFY -static void rustsecp256k1_v0_9_2_fe_verify(const rustsecp256k1_v0_9_2_fe *a) { (void)a; } -static void rustsecp256k1_v0_9_2_fe_verify_magnitude(const rustsecp256k1_v0_9_2_fe *a, int m) { (void)a; (void)m; } +static void rustsecp256k1_v0_10_0_fe_verify(const rustsecp256k1_v0_10_0_fe *a) { (void)a; } +static void rustsecp256k1_v0_10_0_fe_verify_magnitude(const rustsecp256k1_v0_10_0_fe *a, int m) { (void)a; (void)m; } #else -static void rustsecp256k1_v0_9_2_fe_impl_verify(const rustsecp256k1_v0_9_2_fe *a); -static void rustsecp256k1_v0_9_2_fe_verify(const rustsecp256k1_v0_9_2_fe *a) { +static void rustsecp256k1_v0_10_0_fe_impl_verify(const rustsecp256k1_v0_10_0_fe *a); +static void rustsecp256k1_v0_10_0_fe_verify(const rustsecp256k1_v0_10_0_fe *a) { /* Magnitude between 0 and 32. */ - rustsecp256k1_v0_9_2_fe_verify_magnitude(a, 32); + SECP256K1_FE_VERIFY_MAGNITUDE(a, 32); /* Normalized is 0 or 1. */ VERIFY_CHECK((a->normalized == 0) || (a->normalized == 1)); /* If normalized, magnitude must be 0 or 1. */ - if (a->normalized) rustsecp256k1_v0_9_2_fe_verify_magnitude(a, 1); + if (a->normalized) SECP256K1_FE_VERIFY_MAGNITUDE(a, 1); /* Invoke implementation-specific checks. */ - rustsecp256k1_v0_9_2_fe_impl_verify(a); + rustsecp256k1_v0_10_0_fe_impl_verify(a); } -static void rustsecp256k1_v0_9_2_fe_verify_magnitude(const rustsecp256k1_v0_9_2_fe *a, int m) { +static void rustsecp256k1_v0_10_0_fe_verify_magnitude(const rustsecp256k1_v0_10_0_fe *a, int m) { VERIFY_CHECK(m >= 0); VERIFY_CHECK(m <= 32); VERIFY_CHECK(a->magnitude <= m); } -static void rustsecp256k1_v0_9_2_fe_impl_normalize(rustsecp256k1_v0_9_2_fe *r); -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_normalize(rustsecp256k1_v0_9_2_fe *r) { - rustsecp256k1_v0_9_2_fe_verify(r); - rustsecp256k1_v0_9_2_fe_impl_normalize(r); +static void rustsecp256k1_v0_10_0_fe_impl_normalize(rustsecp256k1_v0_10_0_fe *r); +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_normalize(rustsecp256k1_v0_10_0_fe *r) { + SECP256K1_FE_VERIFY(r); + + rustsecp256k1_v0_10_0_fe_impl_normalize(r); r->magnitude = 1; r->normalized = 1; - rustsecp256k1_v0_9_2_fe_verify(r); + + SECP256K1_FE_VERIFY(r); } -static void rustsecp256k1_v0_9_2_fe_impl_normalize_weak(rustsecp256k1_v0_9_2_fe *r); -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_normalize_weak(rustsecp256k1_v0_9_2_fe *r) { - rustsecp256k1_v0_9_2_fe_verify(r); - rustsecp256k1_v0_9_2_fe_impl_normalize_weak(r); +static void rustsecp256k1_v0_10_0_fe_impl_normalize_weak(rustsecp256k1_v0_10_0_fe *r); +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_normalize_weak(rustsecp256k1_v0_10_0_fe *r) { + SECP256K1_FE_VERIFY(r); + + rustsecp256k1_v0_10_0_fe_impl_normalize_weak(r); r->magnitude = 1; - rustsecp256k1_v0_9_2_fe_verify(r); + + SECP256K1_FE_VERIFY(r); } -static void rustsecp256k1_v0_9_2_fe_impl_normalize_var(rustsecp256k1_v0_9_2_fe *r); -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_normalize_var(rustsecp256k1_v0_9_2_fe *r) { - rustsecp256k1_v0_9_2_fe_verify(r); - rustsecp256k1_v0_9_2_fe_impl_normalize_var(r); +static void rustsecp256k1_v0_10_0_fe_impl_normalize_var(rustsecp256k1_v0_10_0_fe *r); +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_normalize_var(rustsecp256k1_v0_10_0_fe *r) { + SECP256K1_FE_VERIFY(r); + + rustsecp256k1_v0_10_0_fe_impl_normalize_var(r); r->magnitude = 1; r->normalized = 1; - rustsecp256k1_v0_9_2_fe_verify(r); + + SECP256K1_FE_VERIFY(r); } -static int rustsecp256k1_v0_9_2_fe_impl_normalizes_to_zero(const rustsecp256k1_v0_9_2_fe *r); -SECP256K1_INLINE static int rustsecp256k1_v0_9_2_fe_normalizes_to_zero(const rustsecp256k1_v0_9_2_fe *r) { - rustsecp256k1_v0_9_2_fe_verify(r); - return rustsecp256k1_v0_9_2_fe_impl_normalizes_to_zero(r); +static int rustsecp256k1_v0_10_0_fe_impl_normalizes_to_zero(const rustsecp256k1_v0_10_0_fe *r); +SECP256K1_INLINE static int rustsecp256k1_v0_10_0_fe_normalizes_to_zero(const rustsecp256k1_v0_10_0_fe *r) { + SECP256K1_FE_VERIFY(r); + + return rustsecp256k1_v0_10_0_fe_impl_normalizes_to_zero(r); } -static int rustsecp256k1_v0_9_2_fe_impl_normalizes_to_zero_var(const rustsecp256k1_v0_9_2_fe *r); -SECP256K1_INLINE static int rustsecp256k1_v0_9_2_fe_normalizes_to_zero_var(const rustsecp256k1_v0_9_2_fe *r) { - rustsecp256k1_v0_9_2_fe_verify(r); - return rustsecp256k1_v0_9_2_fe_impl_normalizes_to_zero_var(r); +static int rustsecp256k1_v0_10_0_fe_impl_normalizes_to_zero_var(const rustsecp256k1_v0_10_0_fe *r); +SECP256K1_INLINE static int rustsecp256k1_v0_10_0_fe_normalizes_to_zero_var(const rustsecp256k1_v0_10_0_fe *r) { + SECP256K1_FE_VERIFY(r); + + return rustsecp256k1_v0_10_0_fe_impl_normalizes_to_zero_var(r); } -static void rustsecp256k1_v0_9_2_fe_impl_set_int(rustsecp256k1_v0_9_2_fe *r, int a); -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_set_int(rustsecp256k1_v0_9_2_fe *r, int a) { +static void rustsecp256k1_v0_10_0_fe_impl_set_int(rustsecp256k1_v0_10_0_fe *r, int a); +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_set_int(rustsecp256k1_v0_10_0_fe *r, int a) { VERIFY_CHECK(0 <= a && a <= 0x7FFF); - rustsecp256k1_v0_9_2_fe_impl_set_int(r, a); + + rustsecp256k1_v0_10_0_fe_impl_set_int(r, a); r->magnitude = (a != 0); r->normalized = 1; - rustsecp256k1_v0_9_2_fe_verify(r); + + SECP256K1_FE_VERIFY(r); } -static void rustsecp256k1_v0_9_2_fe_impl_add_int(rustsecp256k1_v0_9_2_fe *r, int a); -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_add_int(rustsecp256k1_v0_9_2_fe *r, int a) { +static void rustsecp256k1_v0_10_0_fe_impl_add_int(rustsecp256k1_v0_10_0_fe *r, int a); +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_add_int(rustsecp256k1_v0_10_0_fe *r, int a) { VERIFY_CHECK(0 <= a && a <= 0x7FFF); - rustsecp256k1_v0_9_2_fe_verify(r); - rustsecp256k1_v0_9_2_fe_impl_add_int(r, a); + SECP256K1_FE_VERIFY(r); + + rustsecp256k1_v0_10_0_fe_impl_add_int(r, a); r->magnitude += 1; r->normalized = 0; - rustsecp256k1_v0_9_2_fe_verify(r); + + SECP256K1_FE_VERIFY(r); } -static void rustsecp256k1_v0_9_2_fe_impl_clear(rustsecp256k1_v0_9_2_fe *a); -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_clear(rustsecp256k1_v0_9_2_fe *a) { +static void rustsecp256k1_v0_10_0_fe_impl_clear(rustsecp256k1_v0_10_0_fe *a); +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_clear(rustsecp256k1_v0_10_0_fe *a) { a->magnitude = 0; a->normalized = 1; - rustsecp256k1_v0_9_2_fe_impl_clear(a); - rustsecp256k1_v0_9_2_fe_verify(a); + rustsecp256k1_v0_10_0_fe_impl_clear(a); + + SECP256K1_FE_VERIFY(a); } -static int rustsecp256k1_v0_9_2_fe_impl_is_zero(const rustsecp256k1_v0_9_2_fe *a); -SECP256K1_INLINE static int rustsecp256k1_v0_9_2_fe_is_zero(const rustsecp256k1_v0_9_2_fe *a) { - rustsecp256k1_v0_9_2_fe_verify(a); +static int rustsecp256k1_v0_10_0_fe_impl_is_zero(const rustsecp256k1_v0_10_0_fe *a); +SECP256K1_INLINE static int rustsecp256k1_v0_10_0_fe_is_zero(const rustsecp256k1_v0_10_0_fe *a) { + SECP256K1_FE_VERIFY(a); VERIFY_CHECK(a->normalized); - return rustsecp256k1_v0_9_2_fe_impl_is_zero(a); + + return rustsecp256k1_v0_10_0_fe_impl_is_zero(a); } -static int rustsecp256k1_v0_9_2_fe_impl_is_odd(const rustsecp256k1_v0_9_2_fe *a); -SECP256K1_INLINE static int rustsecp256k1_v0_9_2_fe_is_odd(const rustsecp256k1_v0_9_2_fe *a) { - rustsecp256k1_v0_9_2_fe_verify(a); +static int rustsecp256k1_v0_10_0_fe_impl_is_odd(const rustsecp256k1_v0_10_0_fe *a); +SECP256K1_INLINE static int rustsecp256k1_v0_10_0_fe_is_odd(const rustsecp256k1_v0_10_0_fe *a) { + SECP256K1_FE_VERIFY(a); VERIFY_CHECK(a->normalized); - return rustsecp256k1_v0_9_2_fe_impl_is_odd(a); + + return rustsecp256k1_v0_10_0_fe_impl_is_odd(a); } -static int rustsecp256k1_v0_9_2_fe_impl_cmp_var(const rustsecp256k1_v0_9_2_fe *a, const rustsecp256k1_v0_9_2_fe *b); -SECP256K1_INLINE static int rustsecp256k1_v0_9_2_fe_cmp_var(const rustsecp256k1_v0_9_2_fe *a, const rustsecp256k1_v0_9_2_fe *b) { - rustsecp256k1_v0_9_2_fe_verify(a); - rustsecp256k1_v0_9_2_fe_verify(b); +static int rustsecp256k1_v0_10_0_fe_impl_cmp_var(const rustsecp256k1_v0_10_0_fe *a, const rustsecp256k1_v0_10_0_fe *b); +SECP256K1_INLINE static int rustsecp256k1_v0_10_0_fe_cmp_var(const rustsecp256k1_v0_10_0_fe *a, const rustsecp256k1_v0_10_0_fe *b) { + SECP256K1_FE_VERIFY(a); + SECP256K1_FE_VERIFY(b); VERIFY_CHECK(a->normalized); VERIFY_CHECK(b->normalized); - return rustsecp256k1_v0_9_2_fe_impl_cmp_var(a, b); + + return rustsecp256k1_v0_10_0_fe_impl_cmp_var(a, b); } -static void rustsecp256k1_v0_9_2_fe_impl_set_b32_mod(rustsecp256k1_v0_9_2_fe *r, const unsigned char *a); -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_set_b32_mod(rustsecp256k1_v0_9_2_fe *r, const unsigned char *a) { - rustsecp256k1_v0_9_2_fe_impl_set_b32_mod(r, a); +static void rustsecp256k1_v0_10_0_fe_impl_set_b32_mod(rustsecp256k1_v0_10_0_fe *r, const unsigned char *a); +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_set_b32_mod(rustsecp256k1_v0_10_0_fe *r, const unsigned char *a) { + rustsecp256k1_v0_10_0_fe_impl_set_b32_mod(r, a); r->magnitude = 1; r->normalized = 0; - rustsecp256k1_v0_9_2_fe_verify(r); + + SECP256K1_FE_VERIFY(r); } -static int rustsecp256k1_v0_9_2_fe_impl_set_b32_limit(rustsecp256k1_v0_9_2_fe *r, const unsigned char *a); -SECP256K1_INLINE static int rustsecp256k1_v0_9_2_fe_set_b32_limit(rustsecp256k1_v0_9_2_fe *r, const unsigned char *a) { - if (rustsecp256k1_v0_9_2_fe_impl_set_b32_limit(r, a)) { +static int rustsecp256k1_v0_10_0_fe_impl_set_b32_limit(rustsecp256k1_v0_10_0_fe *r, const unsigned char *a); +SECP256K1_INLINE static int rustsecp256k1_v0_10_0_fe_set_b32_limit(rustsecp256k1_v0_10_0_fe *r, const unsigned char *a) { + if (rustsecp256k1_v0_10_0_fe_impl_set_b32_limit(r, a)) { r->magnitude = 1; r->normalized = 1; - rustsecp256k1_v0_9_2_fe_verify(r); + SECP256K1_FE_VERIFY(r); return 1; } else { /* Mark the output field element as invalid. */ @@ -276,147 +290,171 @@ SECP256K1_INLINE static int rustsecp256k1_v0_9_2_fe_set_b32_limit(rustsecp256k1_ } } -static void rustsecp256k1_v0_9_2_fe_impl_get_b32(unsigned char *r, const rustsecp256k1_v0_9_2_fe *a); -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_9_2_fe *a) { - rustsecp256k1_v0_9_2_fe_verify(a); +static void rustsecp256k1_v0_10_0_fe_impl_get_b32(unsigned char *r, const rustsecp256k1_v0_10_0_fe *a); +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_10_0_fe *a) { + SECP256K1_FE_VERIFY(a); VERIFY_CHECK(a->normalized); - rustsecp256k1_v0_9_2_fe_impl_get_b32(r, a); + + rustsecp256k1_v0_10_0_fe_impl_get_b32(r, a); } -static void rustsecp256k1_v0_9_2_fe_impl_negate_unchecked(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *a, int m); -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_negate_unchecked(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *a, int m) { - rustsecp256k1_v0_9_2_fe_verify(a); +static void rustsecp256k1_v0_10_0_fe_impl_negate_unchecked(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *a, int m); +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_negate_unchecked(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *a, int m) { + SECP256K1_FE_VERIFY(a); VERIFY_CHECK(m >= 0 && m <= 31); - rustsecp256k1_v0_9_2_fe_verify_magnitude(a, m); - rustsecp256k1_v0_9_2_fe_impl_negate_unchecked(r, a, m); + SECP256K1_FE_VERIFY_MAGNITUDE(a, m); + + rustsecp256k1_v0_10_0_fe_impl_negate_unchecked(r, a, m); r->magnitude = m + 1; r->normalized = 0; - rustsecp256k1_v0_9_2_fe_verify(r); + + SECP256K1_FE_VERIFY(r); } -static void rustsecp256k1_v0_9_2_fe_impl_mul_int_unchecked(rustsecp256k1_v0_9_2_fe *r, int a); -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_mul_int_unchecked(rustsecp256k1_v0_9_2_fe *r, int a) { - rustsecp256k1_v0_9_2_fe_verify(r); +static void rustsecp256k1_v0_10_0_fe_impl_mul_int_unchecked(rustsecp256k1_v0_10_0_fe *r, int a); +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_mul_int_unchecked(rustsecp256k1_v0_10_0_fe *r, int a) { + SECP256K1_FE_VERIFY(r); + VERIFY_CHECK(a >= 0 && a <= 32); VERIFY_CHECK(a*r->magnitude <= 32); - rustsecp256k1_v0_9_2_fe_impl_mul_int_unchecked(r, a); + rustsecp256k1_v0_10_0_fe_impl_mul_int_unchecked(r, a); r->magnitude *= a; r->normalized = 0; - rustsecp256k1_v0_9_2_fe_verify(r); + + SECP256K1_FE_VERIFY(r); } -static void rustsecp256k1_v0_9_2_fe_impl_add(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *a); -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_add(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *a) { - rustsecp256k1_v0_9_2_fe_verify(r); - rustsecp256k1_v0_9_2_fe_verify(a); +static void rustsecp256k1_v0_10_0_fe_impl_add(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *a); +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_add(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *a) { + SECP256K1_FE_VERIFY(r); + SECP256K1_FE_VERIFY(a); VERIFY_CHECK(r->magnitude + a->magnitude <= 32); - rustsecp256k1_v0_9_2_fe_impl_add(r, a); + + rustsecp256k1_v0_10_0_fe_impl_add(r, a); r->magnitude += a->magnitude; r->normalized = 0; - rustsecp256k1_v0_9_2_fe_verify(r); + + SECP256K1_FE_VERIFY(r); } -static void rustsecp256k1_v0_9_2_fe_impl_mul(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *a, const rustsecp256k1_v0_9_2_fe * SECP256K1_RESTRICT b); -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_mul(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *a, const rustsecp256k1_v0_9_2_fe * SECP256K1_RESTRICT b) { - rustsecp256k1_v0_9_2_fe_verify(a); - rustsecp256k1_v0_9_2_fe_verify(b); - rustsecp256k1_v0_9_2_fe_verify_magnitude(a, 8); - rustsecp256k1_v0_9_2_fe_verify_magnitude(b, 8); +static void rustsecp256k1_v0_10_0_fe_impl_mul(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *a, const rustsecp256k1_v0_10_0_fe * SECP256K1_RESTRICT b); +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_mul(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *a, const rustsecp256k1_v0_10_0_fe * SECP256K1_RESTRICT b) { + SECP256K1_FE_VERIFY(a); + SECP256K1_FE_VERIFY(b); + SECP256K1_FE_VERIFY_MAGNITUDE(a, 8); + SECP256K1_FE_VERIFY_MAGNITUDE(b, 8); VERIFY_CHECK(r != b); VERIFY_CHECK(a != b); - rustsecp256k1_v0_9_2_fe_impl_mul(r, a, b); + + rustsecp256k1_v0_10_0_fe_impl_mul(r, a, b); r->magnitude = 1; r->normalized = 0; - rustsecp256k1_v0_9_2_fe_verify(r); + + SECP256K1_FE_VERIFY(r); } -static void rustsecp256k1_v0_9_2_fe_impl_sqr(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *a); -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_sqr(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *a) { - rustsecp256k1_v0_9_2_fe_verify(a); - rustsecp256k1_v0_9_2_fe_verify_magnitude(a, 8); - rustsecp256k1_v0_9_2_fe_impl_sqr(r, a); +static void rustsecp256k1_v0_10_0_fe_impl_sqr(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *a); +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_sqr(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *a) { + SECP256K1_FE_VERIFY(a); + SECP256K1_FE_VERIFY_MAGNITUDE(a, 8); + + rustsecp256k1_v0_10_0_fe_impl_sqr(r, a); r->magnitude = 1; r->normalized = 0; - rustsecp256k1_v0_9_2_fe_verify(r); + + SECP256K1_FE_VERIFY(r); } -static void rustsecp256k1_v0_9_2_fe_impl_cmov(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *a, int flag); -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_cmov(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *a, int flag) { +static void rustsecp256k1_v0_10_0_fe_impl_cmov(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *a, int flag); +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_cmov(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *a, int flag) { VERIFY_CHECK(flag == 0 || flag == 1); - rustsecp256k1_v0_9_2_fe_verify(a); - rustsecp256k1_v0_9_2_fe_verify(r); - rustsecp256k1_v0_9_2_fe_impl_cmov(r, a, flag); + SECP256K1_FE_VERIFY(a); + SECP256K1_FE_VERIFY(r); + + rustsecp256k1_v0_10_0_fe_impl_cmov(r, a, flag); if (a->magnitude > r->magnitude) r->magnitude = a->magnitude; if (!a->normalized) r->normalized = 0; - rustsecp256k1_v0_9_2_fe_verify(r); + + SECP256K1_FE_VERIFY(r); } -static void rustsecp256k1_v0_9_2_fe_impl_to_storage(rustsecp256k1_v0_9_2_fe_storage *r, const rustsecp256k1_v0_9_2_fe *a); -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_to_storage(rustsecp256k1_v0_9_2_fe_storage *r, const rustsecp256k1_v0_9_2_fe *a) { - rustsecp256k1_v0_9_2_fe_verify(a); +static void rustsecp256k1_v0_10_0_fe_impl_to_storage(rustsecp256k1_v0_10_0_fe_storage *r, const rustsecp256k1_v0_10_0_fe *a); +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_to_storage(rustsecp256k1_v0_10_0_fe_storage *r, const rustsecp256k1_v0_10_0_fe *a) { + SECP256K1_FE_VERIFY(a); VERIFY_CHECK(a->normalized); - rustsecp256k1_v0_9_2_fe_impl_to_storage(r, a); + + rustsecp256k1_v0_10_0_fe_impl_to_storage(r, a); } -static void rustsecp256k1_v0_9_2_fe_impl_from_storage(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe_storage *a); -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_from_storage(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe_storage *a) { - rustsecp256k1_v0_9_2_fe_impl_from_storage(r, a); +static void rustsecp256k1_v0_10_0_fe_impl_from_storage(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe_storage *a); +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_from_storage(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe_storage *a) { + rustsecp256k1_v0_10_0_fe_impl_from_storage(r, a); r->magnitude = 1; r->normalized = 1; - rustsecp256k1_v0_9_2_fe_verify(r); + + SECP256K1_FE_VERIFY(r); } -static void rustsecp256k1_v0_9_2_fe_impl_inv(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *x); -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_inv(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *x) { - int input_is_zero = rustsecp256k1_v0_9_2_fe_normalizes_to_zero(x); - rustsecp256k1_v0_9_2_fe_verify(x); - rustsecp256k1_v0_9_2_fe_impl_inv(r, x); +static void rustsecp256k1_v0_10_0_fe_impl_inv(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *x); +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_inv(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *x) { + int input_is_zero = rustsecp256k1_v0_10_0_fe_normalizes_to_zero(x); + SECP256K1_FE_VERIFY(x); + + rustsecp256k1_v0_10_0_fe_impl_inv(r, x); r->magnitude = x->magnitude > 0; r->normalized = 1; - VERIFY_CHECK(rustsecp256k1_v0_9_2_fe_normalizes_to_zero(r) == input_is_zero); - rustsecp256k1_v0_9_2_fe_verify(r); + + VERIFY_CHECK(rustsecp256k1_v0_10_0_fe_normalizes_to_zero(r) == input_is_zero); + SECP256K1_FE_VERIFY(r); } -static void rustsecp256k1_v0_9_2_fe_impl_inv_var(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *x); -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_inv_var(rustsecp256k1_v0_9_2_fe *r, const rustsecp256k1_v0_9_2_fe *x) { - int input_is_zero = rustsecp256k1_v0_9_2_fe_normalizes_to_zero(x); - rustsecp256k1_v0_9_2_fe_verify(x); - rustsecp256k1_v0_9_2_fe_impl_inv_var(r, x); +static void rustsecp256k1_v0_10_0_fe_impl_inv_var(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *x); +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_inv_var(rustsecp256k1_v0_10_0_fe *r, const rustsecp256k1_v0_10_0_fe *x) { + int input_is_zero = rustsecp256k1_v0_10_0_fe_normalizes_to_zero(x); + SECP256K1_FE_VERIFY(x); + + rustsecp256k1_v0_10_0_fe_impl_inv_var(r, x); r->magnitude = x->magnitude > 0; r->normalized = 1; - VERIFY_CHECK(rustsecp256k1_v0_9_2_fe_normalizes_to_zero(r) == input_is_zero); - rustsecp256k1_v0_9_2_fe_verify(r); + + VERIFY_CHECK(rustsecp256k1_v0_10_0_fe_normalizes_to_zero(r) == input_is_zero); + SECP256K1_FE_VERIFY(r); } -static int rustsecp256k1_v0_9_2_fe_impl_is_square_var(const rustsecp256k1_v0_9_2_fe *x); -SECP256K1_INLINE static int rustsecp256k1_v0_9_2_fe_is_square_var(const rustsecp256k1_v0_9_2_fe *x) { +static int rustsecp256k1_v0_10_0_fe_impl_is_square_var(const rustsecp256k1_v0_10_0_fe *x); +SECP256K1_INLINE static int rustsecp256k1_v0_10_0_fe_is_square_var(const rustsecp256k1_v0_10_0_fe *x) { int ret; - rustsecp256k1_v0_9_2_fe tmp = *x, sqrt; - rustsecp256k1_v0_9_2_fe_verify(x); - ret = rustsecp256k1_v0_9_2_fe_impl_is_square_var(x); - rustsecp256k1_v0_9_2_fe_normalize_weak(&tmp); - VERIFY_CHECK(ret == rustsecp256k1_v0_9_2_fe_sqrt(&sqrt, &tmp)); + rustsecp256k1_v0_10_0_fe tmp = *x, sqrt; + SECP256K1_FE_VERIFY(x); + + ret = rustsecp256k1_v0_10_0_fe_impl_is_square_var(x); + rustsecp256k1_v0_10_0_fe_normalize_weak(&tmp); + VERIFY_CHECK(ret == rustsecp256k1_v0_10_0_fe_sqrt(&sqrt, &tmp)); return ret; } -static void rustsecp256k1_v0_9_2_fe_impl_get_bounds(rustsecp256k1_v0_9_2_fe* r, int m); -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_get_bounds(rustsecp256k1_v0_9_2_fe* r, int m) { +static void rustsecp256k1_v0_10_0_fe_impl_get_bounds(rustsecp256k1_v0_10_0_fe* r, int m); +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_get_bounds(rustsecp256k1_v0_10_0_fe* r, int m) { VERIFY_CHECK(m >= 0); VERIFY_CHECK(m <= 32); - rustsecp256k1_v0_9_2_fe_impl_get_bounds(r, m); + + rustsecp256k1_v0_10_0_fe_impl_get_bounds(r, m); r->magnitude = m; r->normalized = (m == 0); - rustsecp256k1_v0_9_2_fe_verify(r); + + SECP256K1_FE_VERIFY(r); } -static void rustsecp256k1_v0_9_2_fe_impl_half(rustsecp256k1_v0_9_2_fe *r); -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_fe_half(rustsecp256k1_v0_9_2_fe *r) { - rustsecp256k1_v0_9_2_fe_verify(r); - rustsecp256k1_v0_9_2_fe_verify_magnitude(r, 31); - rustsecp256k1_v0_9_2_fe_impl_half(r); +static void rustsecp256k1_v0_10_0_fe_impl_half(rustsecp256k1_v0_10_0_fe *r); +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_fe_half(rustsecp256k1_v0_10_0_fe *r) { + SECP256K1_FE_VERIFY(r); + SECP256K1_FE_VERIFY_MAGNITUDE(r, 31); + + rustsecp256k1_v0_10_0_fe_impl_half(r); r->magnitude = (r->magnitude >> 1) + 1; r->normalized = 0; - rustsecp256k1_v0_9_2_fe_verify(r); + + SECP256K1_FE_VERIFY(r); } #endif /* defined(VERIFY) */ diff --git a/secp256k1-sys/depend/secp256k1/src/group.h b/secp256k1-sys/depend/secp256k1/src/group.h index 6f1f7ab37..db2c57747 100644 --- a/secp256k1-sys/depend/secp256k1/src/group.h +++ b/secp256k1-sys/depend/secp256k1/src/group.h @@ -14,10 +14,10 @@ * Note: For exhaustive test mode, secp256k1 is replaced by a small subgroup of a different curve. */ typedef struct { - rustsecp256k1_v0_9_2_fe x; - rustsecp256k1_v0_9_2_fe y; + rustsecp256k1_v0_10_0_fe x; + rustsecp256k1_v0_10_0_fe y; int infinity; /* whether this represents the point at infinity */ -} rustsecp256k1_v0_9_2_ge; +} rustsecp256k1_v0_10_0_ge; #define SECP256K1_GE_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_CONST((i),(j),(k),(l),(m),(n),(o),(p)), 0} #define SECP256K1_GE_CONST_INFINITY {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), 1} @@ -26,19 +26,19 @@ typedef struct { * Note: For exhastive test mode, secp256k1 is replaced by a small subgroup of a different curve. */ typedef struct { - rustsecp256k1_v0_9_2_fe x; /* actual X: x/z^2 */ - rustsecp256k1_v0_9_2_fe y; /* actual Y: y/z^3 */ - rustsecp256k1_v0_9_2_fe z; + rustsecp256k1_v0_10_0_fe x; /* actual X: x/z^2 */ + rustsecp256k1_v0_10_0_fe y; /* actual Y: y/z^3 */ + rustsecp256k1_v0_10_0_fe z; int infinity; /* whether this represents the point at infinity */ -} rustsecp256k1_v0_9_2_gej; +} rustsecp256k1_v0_10_0_gej; #define SECP256K1_GEJ_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_CONST((i),(j),(k),(l),(m),(n),(o),(p)), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1), 0} #define SECP256K1_GEJ_CONST_INFINITY {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), 1} typedef struct { - rustsecp256k1_v0_9_2_fe_storage x; - rustsecp256k1_v0_9_2_fe_storage y; -} rustsecp256k1_v0_9_2_ge_storage; + rustsecp256k1_v0_10_0_fe_storage x; + rustsecp256k1_v0_10_0_fe_storage y; +} rustsecp256k1_v0_10_0_ge_storage; #define SECP256K1_GE_STORAGE_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_STORAGE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_STORAGE_CONST((i),(j),(k),(l),(m),(n),(o),(p))} @@ -53,35 +53,35 @@ typedef struct { #define SECP256K1_GEJ_Z_MAGNITUDE_MAX 1 /** Set a group element equal to the point with given X and Y coordinates */ -static void rustsecp256k1_v0_9_2_ge_set_xy(rustsecp256k1_v0_9_2_ge *r, const rustsecp256k1_v0_9_2_fe *x, const rustsecp256k1_v0_9_2_fe *y); +static void rustsecp256k1_v0_10_0_ge_set_xy(rustsecp256k1_v0_10_0_ge *r, const rustsecp256k1_v0_10_0_fe *x, const rustsecp256k1_v0_10_0_fe *y); /** Set a group element (affine) equal to the point with the given X coordinate, and given oddness * for Y. Return value indicates whether the result is valid. */ -static int rustsecp256k1_v0_9_2_ge_set_xo_var(rustsecp256k1_v0_9_2_ge *r, const rustsecp256k1_v0_9_2_fe *x, int odd); +static int rustsecp256k1_v0_10_0_ge_set_xo_var(rustsecp256k1_v0_10_0_ge *r, const rustsecp256k1_v0_10_0_fe *x, int odd); /** Determine whether x is a valid X coordinate on the curve. */ -static int rustsecp256k1_v0_9_2_ge_x_on_curve_var(const rustsecp256k1_v0_9_2_fe *x); +static int rustsecp256k1_v0_10_0_ge_x_on_curve_var(const rustsecp256k1_v0_10_0_fe *x); /** Determine whether fraction xn/xd is a valid X coordinate on the curve (xd != 0). */ -static int rustsecp256k1_v0_9_2_ge_x_frac_on_curve_var(const rustsecp256k1_v0_9_2_fe *xn, const rustsecp256k1_v0_9_2_fe *xd); +static int rustsecp256k1_v0_10_0_ge_x_frac_on_curve_var(const rustsecp256k1_v0_10_0_fe *xn, const rustsecp256k1_v0_10_0_fe *xd); /** Check whether a group element is the point at infinity. */ -static int rustsecp256k1_v0_9_2_ge_is_infinity(const rustsecp256k1_v0_9_2_ge *a); +static int rustsecp256k1_v0_10_0_ge_is_infinity(const rustsecp256k1_v0_10_0_ge *a); /** Check whether a group element is valid (i.e., on the curve). */ -static int rustsecp256k1_v0_9_2_ge_is_valid_var(const rustsecp256k1_v0_9_2_ge *a); +static int rustsecp256k1_v0_10_0_ge_is_valid_var(const rustsecp256k1_v0_10_0_ge *a); /** Set r equal to the inverse of a (i.e., mirrored around the X axis) */ -static void rustsecp256k1_v0_9_2_ge_neg(rustsecp256k1_v0_9_2_ge *r, const rustsecp256k1_v0_9_2_ge *a); +static void rustsecp256k1_v0_10_0_ge_neg(rustsecp256k1_v0_10_0_ge *r, const rustsecp256k1_v0_10_0_ge *a); /** Set a group element equal to another which is given in jacobian coordinates. Constant time. */ -static void rustsecp256k1_v0_9_2_ge_set_gej(rustsecp256k1_v0_9_2_ge *r, rustsecp256k1_v0_9_2_gej *a); +static void rustsecp256k1_v0_10_0_ge_set_gej(rustsecp256k1_v0_10_0_ge *r, rustsecp256k1_v0_10_0_gej *a); /** Set a group element equal to another which is given in jacobian coordinates. */ -static void rustsecp256k1_v0_9_2_ge_set_gej_var(rustsecp256k1_v0_9_2_ge *r, rustsecp256k1_v0_9_2_gej *a); +static void rustsecp256k1_v0_10_0_ge_set_gej_var(rustsecp256k1_v0_10_0_ge *r, rustsecp256k1_v0_10_0_gej *a); /** Set a batch of group elements equal to the inputs given in jacobian coordinates */ -static void rustsecp256k1_v0_9_2_ge_set_all_gej_var(rustsecp256k1_v0_9_2_ge *r, const rustsecp256k1_v0_9_2_gej *a, size_t len); +static void rustsecp256k1_v0_10_0_ge_set_all_gej_var(rustsecp256k1_v0_10_0_ge *r, const rustsecp256k1_v0_10_0_gej *a, size_t len); /** Bring a batch of inputs to the same global z "denominator", based on ratios between * (omitted) z coordinates of adjacent elements. @@ -100,73 +100,79 @@ static void rustsecp256k1_v0_9_2_ge_set_all_gej_var(rustsecp256k1_v0_9_2_ge *r, * * The coordinates of the final element a[len-1] are not changed. */ -static void rustsecp256k1_v0_9_2_ge_table_set_globalz(size_t len, rustsecp256k1_v0_9_2_ge *a, const rustsecp256k1_v0_9_2_fe *zr); +static void rustsecp256k1_v0_10_0_ge_table_set_globalz(size_t len, rustsecp256k1_v0_10_0_ge *a, const rustsecp256k1_v0_10_0_fe *zr); + +/** Check two group elements (affine) for equality in variable time. */ +static int rustsecp256k1_v0_10_0_ge_eq_var(const rustsecp256k1_v0_10_0_ge *a, const rustsecp256k1_v0_10_0_ge *b); /** Set a group element (affine) equal to the point at infinity. */ -static void rustsecp256k1_v0_9_2_ge_set_infinity(rustsecp256k1_v0_9_2_ge *r); +static void rustsecp256k1_v0_10_0_ge_set_infinity(rustsecp256k1_v0_10_0_ge *r); /** Set a group element (jacobian) equal to the point at infinity. */ -static void rustsecp256k1_v0_9_2_gej_set_infinity(rustsecp256k1_v0_9_2_gej *r); +static void rustsecp256k1_v0_10_0_gej_set_infinity(rustsecp256k1_v0_10_0_gej *r); /** Set a group element (jacobian) equal to another which is given in affine coordinates. */ -static void rustsecp256k1_v0_9_2_gej_set_ge(rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_ge *a); +static void rustsecp256k1_v0_10_0_gej_set_ge(rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_ge *a); /** Check two group elements (jacobian) for equality in variable time. */ -static int rustsecp256k1_v0_9_2_gej_eq_var(const rustsecp256k1_v0_9_2_gej *a, const rustsecp256k1_v0_9_2_gej *b); +static int rustsecp256k1_v0_10_0_gej_eq_var(const rustsecp256k1_v0_10_0_gej *a, const rustsecp256k1_v0_10_0_gej *b); + +/** Check two group elements (jacobian and affine) for equality in variable time. */ +static int rustsecp256k1_v0_10_0_gej_eq_ge_var(const rustsecp256k1_v0_10_0_gej *a, const rustsecp256k1_v0_10_0_ge *b); /** Compare the X coordinate of a group element (jacobian). * The magnitude of the group element's X coordinate must not exceed 31. */ -static int rustsecp256k1_v0_9_2_gej_eq_x_var(const rustsecp256k1_v0_9_2_fe *x, const rustsecp256k1_v0_9_2_gej *a); +static int rustsecp256k1_v0_10_0_gej_eq_x_var(const rustsecp256k1_v0_10_0_fe *x, const rustsecp256k1_v0_10_0_gej *a); /** Set r equal to the inverse of a (i.e., mirrored around the X axis) */ -static void rustsecp256k1_v0_9_2_gej_neg(rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_gej *a); +static void rustsecp256k1_v0_10_0_gej_neg(rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_gej *a); /** Check whether a group element is the point at infinity. */ -static int rustsecp256k1_v0_9_2_gej_is_infinity(const rustsecp256k1_v0_9_2_gej *a); +static int rustsecp256k1_v0_10_0_gej_is_infinity(const rustsecp256k1_v0_10_0_gej *a); /** Set r equal to the double of a. Constant time. */ -static void rustsecp256k1_v0_9_2_gej_double(rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_gej *a); +static void rustsecp256k1_v0_10_0_gej_double(rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_gej *a); /** Set r equal to the double of a. If rzr is not-NULL this sets *rzr such that r->z == a->z * *rzr (where infinity means an implicit z = 0). */ -static void rustsecp256k1_v0_9_2_gej_double_var(rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_gej *a, rustsecp256k1_v0_9_2_fe *rzr); +static void rustsecp256k1_v0_10_0_gej_double_var(rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_gej *a, rustsecp256k1_v0_10_0_fe *rzr); /** Set r equal to the sum of a and b. If rzr is non-NULL this sets *rzr such that r->z == a->z * *rzr (a cannot be infinity in that case). */ -static void rustsecp256k1_v0_9_2_gej_add_var(rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_gej *a, const rustsecp256k1_v0_9_2_gej *b, rustsecp256k1_v0_9_2_fe *rzr); +static void rustsecp256k1_v0_10_0_gej_add_var(rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_gej *a, const rustsecp256k1_v0_10_0_gej *b, rustsecp256k1_v0_10_0_fe *rzr); /** Set r equal to the sum of a and b (with b given in affine coordinates, and not infinity). */ -static void rustsecp256k1_v0_9_2_gej_add_ge(rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_gej *a, const rustsecp256k1_v0_9_2_ge *b); +static void rustsecp256k1_v0_10_0_gej_add_ge(rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_gej *a, const rustsecp256k1_v0_10_0_ge *b); /** Set r equal to the sum of a and b (with b given in affine coordinates). This is more efficient - than rustsecp256k1_v0_9_2_gej_add_var. It is identical to rustsecp256k1_v0_9_2_gej_add_ge but without constant-time + than rustsecp256k1_v0_10_0_gej_add_var. It is identical to rustsecp256k1_v0_10_0_gej_add_ge but without constant-time guarantee, and b is allowed to be infinity. If rzr is non-NULL this sets *rzr such that r->z == a->z * *rzr (a cannot be infinity in that case). */ -static void rustsecp256k1_v0_9_2_gej_add_ge_var(rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_gej *a, const rustsecp256k1_v0_9_2_ge *b, rustsecp256k1_v0_9_2_fe *rzr); +static void rustsecp256k1_v0_10_0_gej_add_ge_var(rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_gej *a, const rustsecp256k1_v0_10_0_ge *b, rustsecp256k1_v0_10_0_fe *rzr); /** Set r equal to the sum of a and b (with the inverse of b's Z coordinate passed as bzinv). */ -static void rustsecp256k1_v0_9_2_gej_add_zinv_var(rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_gej *a, const rustsecp256k1_v0_9_2_ge *b, const rustsecp256k1_v0_9_2_fe *bzinv); +static void rustsecp256k1_v0_10_0_gej_add_zinv_var(rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_gej *a, const rustsecp256k1_v0_10_0_ge *b, const rustsecp256k1_v0_10_0_fe *bzinv); /** Set r to be equal to lambda times a, where lambda is chosen in a way such that this is very fast. */ -static void rustsecp256k1_v0_9_2_ge_mul_lambda(rustsecp256k1_v0_9_2_ge *r, const rustsecp256k1_v0_9_2_ge *a); +static void rustsecp256k1_v0_10_0_ge_mul_lambda(rustsecp256k1_v0_10_0_ge *r, const rustsecp256k1_v0_10_0_ge *a); -/** Clear a rustsecp256k1_v0_9_2_gej to prevent leaking sensitive information. */ -static void rustsecp256k1_v0_9_2_gej_clear(rustsecp256k1_v0_9_2_gej *r); +/** Clear a rustsecp256k1_v0_10_0_gej to prevent leaking sensitive information. */ +static void rustsecp256k1_v0_10_0_gej_clear(rustsecp256k1_v0_10_0_gej *r); -/** Clear a rustsecp256k1_v0_9_2_ge to prevent leaking sensitive information. */ -static void rustsecp256k1_v0_9_2_ge_clear(rustsecp256k1_v0_9_2_ge *r); +/** Clear a rustsecp256k1_v0_10_0_ge to prevent leaking sensitive information. */ +static void rustsecp256k1_v0_10_0_ge_clear(rustsecp256k1_v0_10_0_ge *r); /** Convert a group element to the storage type. */ -static void rustsecp256k1_v0_9_2_ge_to_storage(rustsecp256k1_v0_9_2_ge_storage *r, const rustsecp256k1_v0_9_2_ge *a); +static void rustsecp256k1_v0_10_0_ge_to_storage(rustsecp256k1_v0_10_0_ge_storage *r, const rustsecp256k1_v0_10_0_ge *a); /** Convert a group element back from the storage type. */ -static void rustsecp256k1_v0_9_2_ge_from_storage(rustsecp256k1_v0_9_2_ge *r, const rustsecp256k1_v0_9_2_ge_storage *a); +static void rustsecp256k1_v0_10_0_ge_from_storage(rustsecp256k1_v0_10_0_ge *r, const rustsecp256k1_v0_10_0_ge_storage *a); /** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized.*/ -static void rustsecp256k1_v0_9_2_gej_cmov(rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_gej *a, int flag); +static void rustsecp256k1_v0_10_0_gej_cmov(rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_gej *a, int flag); /** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized.*/ -static void rustsecp256k1_v0_9_2_ge_storage_cmov(rustsecp256k1_v0_9_2_ge_storage *r, const rustsecp256k1_v0_9_2_ge_storage *a, int flag); +static void rustsecp256k1_v0_10_0_ge_storage_cmov(rustsecp256k1_v0_10_0_ge_storage *r, const rustsecp256k1_v0_10_0_ge_storage *a, int flag); /** Rescale a jacobian point by b which must be non-zero. Constant-time. */ -static void rustsecp256k1_v0_9_2_gej_rescale(rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_fe *b); +static void rustsecp256k1_v0_10_0_gej_rescale(rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_fe *b); /** Determine if a point (which is assumed to be on the curve) is in the correct (sub)group of the curve. * @@ -177,12 +183,14 @@ static void rustsecp256k1_v0_9_2_gej_rescale(rustsecp256k1_v0_9_2_gej *r, const * (very) small subgroup, and that subgroup is what is used for all cryptographic operations. In that mode, this * function checks whether a point that is on the curve is in fact also in that subgroup. */ -static int rustsecp256k1_v0_9_2_ge_is_in_correct_subgroup(const rustsecp256k1_v0_9_2_ge* ge); +static int rustsecp256k1_v0_10_0_ge_is_in_correct_subgroup(const rustsecp256k1_v0_10_0_ge* ge); /** Check invariants on an affine group element (no-op unless VERIFY is enabled). */ -static void rustsecp256k1_v0_9_2_ge_verify(const rustsecp256k1_v0_9_2_ge *a); +static void rustsecp256k1_v0_10_0_ge_verify(const rustsecp256k1_v0_10_0_ge *a); +#define SECP256K1_GE_VERIFY(a) rustsecp256k1_v0_10_0_ge_verify(a) /** Check invariants on a Jacobian group element (no-op unless VERIFY is enabled). */ -static void rustsecp256k1_v0_9_2_gej_verify(const rustsecp256k1_v0_9_2_gej *a); +static void rustsecp256k1_v0_10_0_gej_verify(const rustsecp256k1_v0_10_0_gej *a); +#define SECP256K1_GEJ_VERIFY(a) rustsecp256k1_v0_10_0_gej_verify(a) #endif /* SECP256K1_GROUP_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/group_impl.h b/secp256k1-sys/depend/secp256k1/src/group_impl.h index c970efdf8..8c40e77d1 100644 --- a/secp256k1-sys/depend/secp256k1/src/group_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/group_impl.h @@ -49,17 +49,17 @@ #if defined(EXHAUSTIVE_TEST_ORDER) # if EXHAUSTIVE_TEST_ORDER == 7 -static const rustsecp256k1_v0_9_2_ge rustsecp256k1_v0_9_2_ge_const_g = SECP256K1_G_ORDER_7; +static const rustsecp256k1_v0_10_0_ge rustsecp256k1_v0_10_0_ge_const_g = SECP256K1_G_ORDER_7; #define SECP256K1_B 6 # elif EXHAUSTIVE_TEST_ORDER == 13 -static const rustsecp256k1_v0_9_2_ge rustsecp256k1_v0_9_2_ge_const_g = SECP256K1_G_ORDER_13; +static const rustsecp256k1_v0_10_0_ge rustsecp256k1_v0_10_0_ge_const_g = SECP256K1_G_ORDER_13; #define SECP256K1_B 2 # elif EXHAUSTIVE_TEST_ORDER == 199 -static const rustsecp256k1_v0_9_2_ge rustsecp256k1_v0_9_2_ge_const_g = SECP256K1_G_ORDER_199; +static const rustsecp256k1_v0_10_0_ge rustsecp256k1_v0_10_0_ge_const_g = SECP256K1_G_ORDER_199; #define SECP256K1_B 4 # else @@ -67,155 +67,151 @@ static const rustsecp256k1_v0_9_2_ge rustsecp256k1_v0_9_2_ge_const_g = SECP256K1 # endif #else -static const rustsecp256k1_v0_9_2_ge rustsecp256k1_v0_9_2_ge_const_g = SECP256K1_G; +static const rustsecp256k1_v0_10_0_ge rustsecp256k1_v0_10_0_ge_const_g = SECP256K1_G; #define SECP256K1_B 7 #endif /* End of section generated by sage/gen_exhaustive_groups.sage. */ -static void rustsecp256k1_v0_9_2_ge_verify(const rustsecp256k1_v0_9_2_ge *a) { -#ifdef VERIFY - rustsecp256k1_v0_9_2_fe_verify(&a->x); - rustsecp256k1_v0_9_2_fe_verify(&a->y); - rustsecp256k1_v0_9_2_fe_verify_magnitude(&a->x, SECP256K1_GE_X_MAGNITUDE_MAX); - rustsecp256k1_v0_9_2_fe_verify_magnitude(&a->y, SECP256K1_GE_Y_MAGNITUDE_MAX); +static void rustsecp256k1_v0_10_0_ge_verify(const rustsecp256k1_v0_10_0_ge *a) { + SECP256K1_FE_VERIFY(&a->x); + SECP256K1_FE_VERIFY(&a->y); + SECP256K1_FE_VERIFY_MAGNITUDE(&a->x, SECP256K1_GE_X_MAGNITUDE_MAX); + SECP256K1_FE_VERIFY_MAGNITUDE(&a->y, SECP256K1_GE_Y_MAGNITUDE_MAX); VERIFY_CHECK(a->infinity == 0 || a->infinity == 1); -#endif (void)a; } -static void rustsecp256k1_v0_9_2_gej_verify(const rustsecp256k1_v0_9_2_gej *a) { -#ifdef VERIFY - rustsecp256k1_v0_9_2_fe_verify(&a->x); - rustsecp256k1_v0_9_2_fe_verify(&a->y); - rustsecp256k1_v0_9_2_fe_verify(&a->z); - rustsecp256k1_v0_9_2_fe_verify_magnitude(&a->x, SECP256K1_GEJ_X_MAGNITUDE_MAX); - rustsecp256k1_v0_9_2_fe_verify_magnitude(&a->y, SECP256K1_GEJ_Y_MAGNITUDE_MAX); - rustsecp256k1_v0_9_2_fe_verify_magnitude(&a->z, SECP256K1_GEJ_Z_MAGNITUDE_MAX); +static void rustsecp256k1_v0_10_0_gej_verify(const rustsecp256k1_v0_10_0_gej *a) { + SECP256K1_FE_VERIFY(&a->x); + SECP256K1_FE_VERIFY(&a->y); + SECP256K1_FE_VERIFY(&a->z); + SECP256K1_FE_VERIFY_MAGNITUDE(&a->x, SECP256K1_GEJ_X_MAGNITUDE_MAX); + SECP256K1_FE_VERIFY_MAGNITUDE(&a->y, SECP256K1_GEJ_Y_MAGNITUDE_MAX); + SECP256K1_FE_VERIFY_MAGNITUDE(&a->z, SECP256K1_GEJ_Z_MAGNITUDE_MAX); VERIFY_CHECK(a->infinity == 0 || a->infinity == 1); -#endif (void)a; } /* Set r to the affine coordinates of Jacobian point (a.x, a.y, 1/zi). */ -static void rustsecp256k1_v0_9_2_ge_set_gej_zinv(rustsecp256k1_v0_9_2_ge *r, const rustsecp256k1_v0_9_2_gej *a, const rustsecp256k1_v0_9_2_fe *zi) { - rustsecp256k1_v0_9_2_fe zi2; - rustsecp256k1_v0_9_2_fe zi3; - rustsecp256k1_v0_9_2_gej_verify(a); - rustsecp256k1_v0_9_2_fe_verify(zi); +static void rustsecp256k1_v0_10_0_ge_set_gej_zinv(rustsecp256k1_v0_10_0_ge *r, const rustsecp256k1_v0_10_0_gej *a, const rustsecp256k1_v0_10_0_fe *zi) { + rustsecp256k1_v0_10_0_fe zi2; + rustsecp256k1_v0_10_0_fe zi3; + SECP256K1_GEJ_VERIFY(a); + SECP256K1_FE_VERIFY(zi); VERIFY_CHECK(!a->infinity); - rustsecp256k1_v0_9_2_fe_sqr(&zi2, zi); - rustsecp256k1_v0_9_2_fe_mul(&zi3, &zi2, zi); - rustsecp256k1_v0_9_2_fe_mul(&r->x, &a->x, &zi2); - rustsecp256k1_v0_9_2_fe_mul(&r->y, &a->y, &zi3); + rustsecp256k1_v0_10_0_fe_sqr(&zi2, zi); + rustsecp256k1_v0_10_0_fe_mul(&zi3, &zi2, zi); + rustsecp256k1_v0_10_0_fe_mul(&r->x, &a->x, &zi2); + rustsecp256k1_v0_10_0_fe_mul(&r->y, &a->y, &zi3); r->infinity = a->infinity; - rustsecp256k1_v0_9_2_ge_verify(r); + SECP256K1_GE_VERIFY(r); } /* Set r to the affine coordinates of Jacobian point (a.x, a.y, 1/zi). */ -static void rustsecp256k1_v0_9_2_ge_set_ge_zinv(rustsecp256k1_v0_9_2_ge *r, const rustsecp256k1_v0_9_2_ge *a, const rustsecp256k1_v0_9_2_fe *zi) { - rustsecp256k1_v0_9_2_fe zi2; - rustsecp256k1_v0_9_2_fe zi3; - rustsecp256k1_v0_9_2_ge_verify(a); - rustsecp256k1_v0_9_2_fe_verify(zi); +static void rustsecp256k1_v0_10_0_ge_set_ge_zinv(rustsecp256k1_v0_10_0_ge *r, const rustsecp256k1_v0_10_0_ge *a, const rustsecp256k1_v0_10_0_fe *zi) { + rustsecp256k1_v0_10_0_fe zi2; + rustsecp256k1_v0_10_0_fe zi3; + SECP256K1_GE_VERIFY(a); + SECP256K1_FE_VERIFY(zi); VERIFY_CHECK(!a->infinity); - rustsecp256k1_v0_9_2_fe_sqr(&zi2, zi); - rustsecp256k1_v0_9_2_fe_mul(&zi3, &zi2, zi); - rustsecp256k1_v0_9_2_fe_mul(&r->x, &a->x, &zi2); - rustsecp256k1_v0_9_2_fe_mul(&r->y, &a->y, &zi3); + rustsecp256k1_v0_10_0_fe_sqr(&zi2, zi); + rustsecp256k1_v0_10_0_fe_mul(&zi3, &zi2, zi); + rustsecp256k1_v0_10_0_fe_mul(&r->x, &a->x, &zi2); + rustsecp256k1_v0_10_0_fe_mul(&r->y, &a->y, &zi3); r->infinity = a->infinity; - rustsecp256k1_v0_9_2_ge_verify(r); + SECP256K1_GE_VERIFY(r); } -static void rustsecp256k1_v0_9_2_ge_set_xy(rustsecp256k1_v0_9_2_ge *r, const rustsecp256k1_v0_9_2_fe *x, const rustsecp256k1_v0_9_2_fe *y) { - rustsecp256k1_v0_9_2_fe_verify(x); - rustsecp256k1_v0_9_2_fe_verify(y); +static void rustsecp256k1_v0_10_0_ge_set_xy(rustsecp256k1_v0_10_0_ge *r, const rustsecp256k1_v0_10_0_fe *x, const rustsecp256k1_v0_10_0_fe *y) { + SECP256K1_FE_VERIFY(x); + SECP256K1_FE_VERIFY(y); r->infinity = 0; r->x = *x; r->y = *y; - rustsecp256k1_v0_9_2_ge_verify(r); + SECP256K1_GE_VERIFY(r); } -static int rustsecp256k1_v0_9_2_ge_is_infinity(const rustsecp256k1_v0_9_2_ge *a) { - rustsecp256k1_v0_9_2_ge_verify(a); +static int rustsecp256k1_v0_10_0_ge_is_infinity(const rustsecp256k1_v0_10_0_ge *a) { + SECP256K1_GE_VERIFY(a); return a->infinity; } -static void rustsecp256k1_v0_9_2_ge_neg(rustsecp256k1_v0_9_2_ge *r, const rustsecp256k1_v0_9_2_ge *a) { - rustsecp256k1_v0_9_2_ge_verify(a); +static void rustsecp256k1_v0_10_0_ge_neg(rustsecp256k1_v0_10_0_ge *r, const rustsecp256k1_v0_10_0_ge *a) { + SECP256K1_GE_VERIFY(a); *r = *a; - rustsecp256k1_v0_9_2_fe_normalize_weak(&r->y); - rustsecp256k1_v0_9_2_fe_negate(&r->y, &r->y, 1); + rustsecp256k1_v0_10_0_fe_normalize_weak(&r->y); + rustsecp256k1_v0_10_0_fe_negate(&r->y, &r->y, 1); - rustsecp256k1_v0_9_2_ge_verify(r); + SECP256K1_GE_VERIFY(r); } -static void rustsecp256k1_v0_9_2_ge_set_gej(rustsecp256k1_v0_9_2_ge *r, rustsecp256k1_v0_9_2_gej *a) { - rustsecp256k1_v0_9_2_fe z2, z3; - rustsecp256k1_v0_9_2_gej_verify(a); +static void rustsecp256k1_v0_10_0_ge_set_gej(rustsecp256k1_v0_10_0_ge *r, rustsecp256k1_v0_10_0_gej *a) { + rustsecp256k1_v0_10_0_fe z2, z3; + SECP256K1_GEJ_VERIFY(a); r->infinity = a->infinity; - rustsecp256k1_v0_9_2_fe_inv(&a->z, &a->z); - rustsecp256k1_v0_9_2_fe_sqr(&z2, &a->z); - rustsecp256k1_v0_9_2_fe_mul(&z3, &a->z, &z2); - rustsecp256k1_v0_9_2_fe_mul(&a->x, &a->x, &z2); - rustsecp256k1_v0_9_2_fe_mul(&a->y, &a->y, &z3); - rustsecp256k1_v0_9_2_fe_set_int(&a->z, 1); + rustsecp256k1_v0_10_0_fe_inv(&a->z, &a->z); + rustsecp256k1_v0_10_0_fe_sqr(&z2, &a->z); + rustsecp256k1_v0_10_0_fe_mul(&z3, &a->z, &z2); + rustsecp256k1_v0_10_0_fe_mul(&a->x, &a->x, &z2); + rustsecp256k1_v0_10_0_fe_mul(&a->y, &a->y, &z3); + rustsecp256k1_v0_10_0_fe_set_int(&a->z, 1); r->x = a->x; r->y = a->y; - rustsecp256k1_v0_9_2_gej_verify(a); - rustsecp256k1_v0_9_2_ge_verify(r); + SECP256K1_GEJ_VERIFY(a); + SECP256K1_GE_VERIFY(r); } -static void rustsecp256k1_v0_9_2_ge_set_gej_var(rustsecp256k1_v0_9_2_ge *r, rustsecp256k1_v0_9_2_gej *a) { - rustsecp256k1_v0_9_2_fe z2, z3; - rustsecp256k1_v0_9_2_gej_verify(a); +static void rustsecp256k1_v0_10_0_ge_set_gej_var(rustsecp256k1_v0_10_0_ge *r, rustsecp256k1_v0_10_0_gej *a) { + rustsecp256k1_v0_10_0_fe z2, z3; + SECP256K1_GEJ_VERIFY(a); - if (rustsecp256k1_v0_9_2_gej_is_infinity(a)) { - rustsecp256k1_v0_9_2_ge_set_infinity(r); + if (rustsecp256k1_v0_10_0_gej_is_infinity(a)) { + rustsecp256k1_v0_10_0_ge_set_infinity(r); return; } r->infinity = 0; - rustsecp256k1_v0_9_2_fe_inv_var(&a->z, &a->z); - rustsecp256k1_v0_9_2_fe_sqr(&z2, &a->z); - rustsecp256k1_v0_9_2_fe_mul(&z3, &a->z, &z2); - rustsecp256k1_v0_9_2_fe_mul(&a->x, &a->x, &z2); - rustsecp256k1_v0_9_2_fe_mul(&a->y, &a->y, &z3); - rustsecp256k1_v0_9_2_fe_set_int(&a->z, 1); - rustsecp256k1_v0_9_2_ge_set_xy(r, &a->x, &a->y); + rustsecp256k1_v0_10_0_fe_inv_var(&a->z, &a->z); + rustsecp256k1_v0_10_0_fe_sqr(&z2, &a->z); + rustsecp256k1_v0_10_0_fe_mul(&z3, &a->z, &z2); + rustsecp256k1_v0_10_0_fe_mul(&a->x, &a->x, &z2); + rustsecp256k1_v0_10_0_fe_mul(&a->y, &a->y, &z3); + rustsecp256k1_v0_10_0_fe_set_int(&a->z, 1); + rustsecp256k1_v0_10_0_ge_set_xy(r, &a->x, &a->y); - rustsecp256k1_v0_9_2_gej_verify(a); - rustsecp256k1_v0_9_2_ge_verify(r); + SECP256K1_GEJ_VERIFY(a); + SECP256K1_GE_VERIFY(r); } -static void rustsecp256k1_v0_9_2_ge_set_all_gej_var(rustsecp256k1_v0_9_2_ge *r, const rustsecp256k1_v0_9_2_gej *a, size_t len) { - rustsecp256k1_v0_9_2_fe u; +static void rustsecp256k1_v0_10_0_ge_set_all_gej_var(rustsecp256k1_v0_10_0_ge *r, const rustsecp256k1_v0_10_0_gej *a, size_t len) { + rustsecp256k1_v0_10_0_fe u; size_t i; size_t last_i = SIZE_MAX; #ifdef VERIFY for (i = 0; i < len; i++) { - rustsecp256k1_v0_9_2_gej_verify(&a[i]); + SECP256K1_GEJ_VERIFY(&a[i]); } #endif for (i = 0; i < len; i++) { if (a[i].infinity) { - rustsecp256k1_v0_9_2_ge_set_infinity(&r[i]); + rustsecp256k1_v0_10_0_ge_set_infinity(&r[i]); } else { /* Use destination's x coordinates as scratch space */ if (last_i == SIZE_MAX) { r[i].x = a[i].z; } else { - rustsecp256k1_v0_9_2_fe_mul(&r[i].x, &r[last_i].x, &a[i].z); + rustsecp256k1_v0_10_0_fe_mul(&r[i].x, &r[last_i].x, &a[i].z); } last_i = i; } @@ -223,14 +219,14 @@ static void rustsecp256k1_v0_9_2_ge_set_all_gej_var(rustsecp256k1_v0_9_2_ge *r, if (last_i == SIZE_MAX) { return; } - rustsecp256k1_v0_9_2_fe_inv_var(&u, &r[last_i].x); + rustsecp256k1_v0_10_0_fe_inv_var(&u, &r[last_i].x); i = last_i; while (i > 0) { i--; if (!a[i].infinity) { - rustsecp256k1_v0_9_2_fe_mul(&r[last_i].x, &r[i].x, &u); - rustsecp256k1_v0_9_2_fe_mul(&u, &u, &a[last_i].z); + rustsecp256k1_v0_10_0_fe_mul(&r[last_i].x, &r[i].x, &u); + rustsecp256k1_v0_10_0_fe_mul(&u, &u, &a[last_i].z); last_i = i; } } @@ -239,174 +235,201 @@ static void rustsecp256k1_v0_9_2_ge_set_all_gej_var(rustsecp256k1_v0_9_2_ge *r, for (i = 0; i < len; i++) { if (!a[i].infinity) { - rustsecp256k1_v0_9_2_ge_set_gej_zinv(&r[i], &a[i], &r[i].x); + rustsecp256k1_v0_10_0_ge_set_gej_zinv(&r[i], &a[i], &r[i].x); } } #ifdef VERIFY for (i = 0; i < len; i++) { - rustsecp256k1_v0_9_2_ge_verify(&r[i]); + SECP256K1_GE_VERIFY(&r[i]); } #endif } -static void rustsecp256k1_v0_9_2_ge_table_set_globalz(size_t len, rustsecp256k1_v0_9_2_ge *a, const rustsecp256k1_v0_9_2_fe *zr) { +static void rustsecp256k1_v0_10_0_ge_table_set_globalz(size_t len, rustsecp256k1_v0_10_0_ge *a, const rustsecp256k1_v0_10_0_fe *zr) { size_t i; - rustsecp256k1_v0_9_2_fe zs; + rustsecp256k1_v0_10_0_fe zs; #ifdef VERIFY for (i = 0; i < len; i++) { - rustsecp256k1_v0_9_2_ge_verify(&a[i]); - rustsecp256k1_v0_9_2_fe_verify(&zr[i]); + SECP256K1_GE_VERIFY(&a[i]); + SECP256K1_FE_VERIFY(&zr[i]); } #endif if (len > 0) { i = len - 1; /* Ensure all y values are in weak normal form for fast negation of points */ - rustsecp256k1_v0_9_2_fe_normalize_weak(&a[i].y); + rustsecp256k1_v0_10_0_fe_normalize_weak(&a[i].y); zs = zr[i]; /* Work our way backwards, using the z-ratios to scale the x/y values. */ while (i > 0) { if (i != len - 1) { - rustsecp256k1_v0_9_2_fe_mul(&zs, &zs, &zr[i]); + rustsecp256k1_v0_10_0_fe_mul(&zs, &zs, &zr[i]); } i--; - rustsecp256k1_v0_9_2_ge_set_ge_zinv(&a[i], &a[i], &zs); + rustsecp256k1_v0_10_0_ge_set_ge_zinv(&a[i], &a[i], &zs); } } #ifdef VERIFY for (i = 0; i < len; i++) { - rustsecp256k1_v0_9_2_ge_verify(&a[i]); + SECP256K1_GE_VERIFY(&a[i]); } #endif } -static void rustsecp256k1_v0_9_2_gej_set_infinity(rustsecp256k1_v0_9_2_gej *r) { +static void rustsecp256k1_v0_10_0_gej_set_infinity(rustsecp256k1_v0_10_0_gej *r) { r->infinity = 1; - rustsecp256k1_v0_9_2_fe_clear(&r->x); - rustsecp256k1_v0_9_2_fe_clear(&r->y); - rustsecp256k1_v0_9_2_fe_clear(&r->z); + rustsecp256k1_v0_10_0_fe_clear(&r->x); + rustsecp256k1_v0_10_0_fe_clear(&r->y); + rustsecp256k1_v0_10_0_fe_clear(&r->z); - rustsecp256k1_v0_9_2_gej_verify(r); + SECP256K1_GEJ_VERIFY(r); } -static void rustsecp256k1_v0_9_2_ge_set_infinity(rustsecp256k1_v0_9_2_ge *r) { +static void rustsecp256k1_v0_10_0_ge_set_infinity(rustsecp256k1_v0_10_0_ge *r) { r->infinity = 1; - rustsecp256k1_v0_9_2_fe_clear(&r->x); - rustsecp256k1_v0_9_2_fe_clear(&r->y); + rustsecp256k1_v0_10_0_fe_clear(&r->x); + rustsecp256k1_v0_10_0_fe_clear(&r->y); - rustsecp256k1_v0_9_2_ge_verify(r); + SECP256K1_GE_VERIFY(r); } -static void rustsecp256k1_v0_9_2_gej_clear(rustsecp256k1_v0_9_2_gej *r) { +static void rustsecp256k1_v0_10_0_gej_clear(rustsecp256k1_v0_10_0_gej *r) { r->infinity = 0; - rustsecp256k1_v0_9_2_fe_clear(&r->x); - rustsecp256k1_v0_9_2_fe_clear(&r->y); - rustsecp256k1_v0_9_2_fe_clear(&r->z); + rustsecp256k1_v0_10_0_fe_clear(&r->x); + rustsecp256k1_v0_10_0_fe_clear(&r->y); + rustsecp256k1_v0_10_0_fe_clear(&r->z); - rustsecp256k1_v0_9_2_gej_verify(r); + SECP256K1_GEJ_VERIFY(r); } -static void rustsecp256k1_v0_9_2_ge_clear(rustsecp256k1_v0_9_2_ge *r) { +static void rustsecp256k1_v0_10_0_ge_clear(rustsecp256k1_v0_10_0_ge *r) { r->infinity = 0; - rustsecp256k1_v0_9_2_fe_clear(&r->x); - rustsecp256k1_v0_9_2_fe_clear(&r->y); + rustsecp256k1_v0_10_0_fe_clear(&r->x); + rustsecp256k1_v0_10_0_fe_clear(&r->y); - rustsecp256k1_v0_9_2_ge_verify(r); + SECP256K1_GE_VERIFY(r); } -static int rustsecp256k1_v0_9_2_ge_set_xo_var(rustsecp256k1_v0_9_2_ge *r, const rustsecp256k1_v0_9_2_fe *x, int odd) { - rustsecp256k1_v0_9_2_fe x2, x3; +static int rustsecp256k1_v0_10_0_ge_set_xo_var(rustsecp256k1_v0_10_0_ge *r, const rustsecp256k1_v0_10_0_fe *x, int odd) { + rustsecp256k1_v0_10_0_fe x2, x3; int ret; - rustsecp256k1_v0_9_2_fe_verify(x); + SECP256K1_FE_VERIFY(x); r->x = *x; - rustsecp256k1_v0_9_2_fe_sqr(&x2, x); - rustsecp256k1_v0_9_2_fe_mul(&x3, x, &x2); + rustsecp256k1_v0_10_0_fe_sqr(&x2, x); + rustsecp256k1_v0_10_0_fe_mul(&x3, x, &x2); r->infinity = 0; - rustsecp256k1_v0_9_2_fe_add_int(&x3, SECP256K1_B); - ret = rustsecp256k1_v0_9_2_fe_sqrt(&r->y, &x3); - rustsecp256k1_v0_9_2_fe_normalize_var(&r->y); - if (rustsecp256k1_v0_9_2_fe_is_odd(&r->y) != odd) { - rustsecp256k1_v0_9_2_fe_negate(&r->y, &r->y, 1); + rustsecp256k1_v0_10_0_fe_add_int(&x3, SECP256K1_B); + ret = rustsecp256k1_v0_10_0_fe_sqrt(&r->y, &x3); + rustsecp256k1_v0_10_0_fe_normalize_var(&r->y); + if (rustsecp256k1_v0_10_0_fe_is_odd(&r->y) != odd) { + rustsecp256k1_v0_10_0_fe_negate(&r->y, &r->y, 1); } - rustsecp256k1_v0_9_2_ge_verify(r); + SECP256K1_GE_VERIFY(r); return ret; } -static void rustsecp256k1_v0_9_2_gej_set_ge(rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_ge *a) { - rustsecp256k1_v0_9_2_ge_verify(a); +static void rustsecp256k1_v0_10_0_gej_set_ge(rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_ge *a) { + SECP256K1_GE_VERIFY(a); r->infinity = a->infinity; r->x = a->x; r->y = a->y; - rustsecp256k1_v0_9_2_fe_set_int(&r->z, 1); + rustsecp256k1_v0_10_0_fe_set_int(&r->z, 1); - rustsecp256k1_v0_9_2_gej_verify(r); + SECP256K1_GEJ_VERIFY(r); } -static int rustsecp256k1_v0_9_2_gej_eq_var(const rustsecp256k1_v0_9_2_gej *a, const rustsecp256k1_v0_9_2_gej *b) { - rustsecp256k1_v0_9_2_gej tmp; - rustsecp256k1_v0_9_2_gej_verify(b); - rustsecp256k1_v0_9_2_gej_verify(a); +static int rustsecp256k1_v0_10_0_gej_eq_var(const rustsecp256k1_v0_10_0_gej *a, const rustsecp256k1_v0_10_0_gej *b) { + rustsecp256k1_v0_10_0_gej tmp; + SECP256K1_GEJ_VERIFY(b); + SECP256K1_GEJ_VERIFY(a); - rustsecp256k1_v0_9_2_gej_neg(&tmp, a); - rustsecp256k1_v0_9_2_gej_add_var(&tmp, &tmp, b, NULL); - return rustsecp256k1_v0_9_2_gej_is_infinity(&tmp); + rustsecp256k1_v0_10_0_gej_neg(&tmp, a); + rustsecp256k1_v0_10_0_gej_add_var(&tmp, &tmp, b, NULL); + return rustsecp256k1_v0_10_0_gej_is_infinity(&tmp); } -static int rustsecp256k1_v0_9_2_gej_eq_x_var(const rustsecp256k1_v0_9_2_fe *x, const rustsecp256k1_v0_9_2_gej *a) { - rustsecp256k1_v0_9_2_fe r; - rustsecp256k1_v0_9_2_fe_verify(x); - rustsecp256k1_v0_9_2_gej_verify(a); -#ifdef VERIFY +static int rustsecp256k1_v0_10_0_gej_eq_ge_var(const rustsecp256k1_v0_10_0_gej *a, const rustsecp256k1_v0_10_0_ge *b) { + rustsecp256k1_v0_10_0_gej tmp; + SECP256K1_GEJ_VERIFY(a); + SECP256K1_GE_VERIFY(b); + + rustsecp256k1_v0_10_0_gej_neg(&tmp, a); + rustsecp256k1_v0_10_0_gej_add_ge_var(&tmp, &tmp, b, NULL); + return rustsecp256k1_v0_10_0_gej_is_infinity(&tmp); +} + +static int rustsecp256k1_v0_10_0_ge_eq_var(const rustsecp256k1_v0_10_0_ge *a, const rustsecp256k1_v0_10_0_ge *b) { + rustsecp256k1_v0_10_0_fe tmp; + SECP256K1_GE_VERIFY(a); + SECP256K1_GE_VERIFY(b); + + if (a->infinity != b->infinity) return 0; + if (a->infinity) return 1; + + tmp = a->x; + rustsecp256k1_v0_10_0_fe_normalize_weak(&tmp); + if (!rustsecp256k1_v0_10_0_fe_equal(&tmp, &b->x)) return 0; + + tmp = a->y; + rustsecp256k1_v0_10_0_fe_normalize_weak(&tmp); + if (!rustsecp256k1_v0_10_0_fe_equal(&tmp, &b->y)) return 0; + + return 1; +} + +static int rustsecp256k1_v0_10_0_gej_eq_x_var(const rustsecp256k1_v0_10_0_fe *x, const rustsecp256k1_v0_10_0_gej *a) { + rustsecp256k1_v0_10_0_fe r; + SECP256K1_FE_VERIFY(x); + SECP256K1_GEJ_VERIFY(a); VERIFY_CHECK(!a->infinity); -#endif - rustsecp256k1_v0_9_2_fe_sqr(&r, &a->z); rustsecp256k1_v0_9_2_fe_mul(&r, &r, x); - return rustsecp256k1_v0_9_2_fe_equal(&r, &a->x); + rustsecp256k1_v0_10_0_fe_sqr(&r, &a->z); rustsecp256k1_v0_10_0_fe_mul(&r, &r, x); + return rustsecp256k1_v0_10_0_fe_equal(&r, &a->x); } -static void rustsecp256k1_v0_9_2_gej_neg(rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_gej *a) { - rustsecp256k1_v0_9_2_gej_verify(a); +static void rustsecp256k1_v0_10_0_gej_neg(rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_gej *a) { + SECP256K1_GEJ_VERIFY(a); r->infinity = a->infinity; r->x = a->x; r->y = a->y; r->z = a->z; - rustsecp256k1_v0_9_2_fe_normalize_weak(&r->y); - rustsecp256k1_v0_9_2_fe_negate(&r->y, &r->y, 1); + rustsecp256k1_v0_10_0_fe_normalize_weak(&r->y); + rustsecp256k1_v0_10_0_fe_negate(&r->y, &r->y, 1); - rustsecp256k1_v0_9_2_gej_verify(r); + SECP256K1_GEJ_VERIFY(r); } -static int rustsecp256k1_v0_9_2_gej_is_infinity(const rustsecp256k1_v0_9_2_gej *a) { - rustsecp256k1_v0_9_2_gej_verify(a); +static int rustsecp256k1_v0_10_0_gej_is_infinity(const rustsecp256k1_v0_10_0_gej *a) { + SECP256K1_GEJ_VERIFY(a); return a->infinity; } -static int rustsecp256k1_v0_9_2_ge_is_valid_var(const rustsecp256k1_v0_9_2_ge *a) { - rustsecp256k1_v0_9_2_fe y2, x3; - rustsecp256k1_v0_9_2_ge_verify(a); +static int rustsecp256k1_v0_10_0_ge_is_valid_var(const rustsecp256k1_v0_10_0_ge *a) { + rustsecp256k1_v0_10_0_fe y2, x3; + SECP256K1_GE_VERIFY(a); if (a->infinity) { return 0; } /* y^2 = x^3 + 7 */ - rustsecp256k1_v0_9_2_fe_sqr(&y2, &a->y); - rustsecp256k1_v0_9_2_fe_sqr(&x3, &a->x); rustsecp256k1_v0_9_2_fe_mul(&x3, &x3, &a->x); - rustsecp256k1_v0_9_2_fe_add_int(&x3, SECP256K1_B); - return rustsecp256k1_v0_9_2_fe_equal(&y2, &x3); + rustsecp256k1_v0_10_0_fe_sqr(&y2, &a->y); + rustsecp256k1_v0_10_0_fe_sqr(&x3, &a->x); rustsecp256k1_v0_10_0_fe_mul(&x3, &x3, &a->x); + rustsecp256k1_v0_10_0_fe_add_int(&x3, SECP256K1_B); + return rustsecp256k1_v0_10_0_fe_equal(&y2, &x3); } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_gej_double(rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_gej *a) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_gej_double(rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_gej *a) { /* Operations: 3 mul, 4 sqr, 8 add/half/mul_int/negate */ - rustsecp256k1_v0_9_2_fe l, s, t; - rustsecp256k1_v0_9_2_gej_verify(a); + rustsecp256k1_v0_10_0_fe l, s, t; + SECP256K1_GEJ_VERIFY(a); r->infinity = a->infinity; @@ -419,27 +442,27 @@ static SECP256K1_INLINE void rustsecp256k1_v0_9_2_gej_double(rustsecp256k1_v0_9_ * Z3 = Y1*Z1 */ - rustsecp256k1_v0_9_2_fe_mul(&r->z, &a->z, &a->y); /* Z3 = Y1*Z1 (1) */ - rustsecp256k1_v0_9_2_fe_sqr(&s, &a->y); /* S = Y1^2 (1) */ - rustsecp256k1_v0_9_2_fe_sqr(&l, &a->x); /* L = X1^2 (1) */ - rustsecp256k1_v0_9_2_fe_mul_int(&l, 3); /* L = 3*X1^2 (3) */ - rustsecp256k1_v0_9_2_fe_half(&l); /* L = 3/2*X1^2 (2) */ - rustsecp256k1_v0_9_2_fe_negate(&t, &s, 1); /* T = -S (2) */ - rustsecp256k1_v0_9_2_fe_mul(&t, &t, &a->x); /* T = -X1*S (1) */ - rustsecp256k1_v0_9_2_fe_sqr(&r->x, &l); /* X3 = L^2 (1) */ - rustsecp256k1_v0_9_2_fe_add(&r->x, &t); /* X3 = L^2 + T (2) */ - rustsecp256k1_v0_9_2_fe_add(&r->x, &t); /* X3 = L^2 + 2*T (3) */ - rustsecp256k1_v0_9_2_fe_sqr(&s, &s); /* S' = S^2 (1) */ - rustsecp256k1_v0_9_2_fe_add(&t, &r->x); /* T' = X3 + T (4) */ - rustsecp256k1_v0_9_2_fe_mul(&r->y, &t, &l); /* Y3 = L*(X3 + T) (1) */ - rustsecp256k1_v0_9_2_fe_add(&r->y, &s); /* Y3 = L*(X3 + T) + S^2 (2) */ - rustsecp256k1_v0_9_2_fe_negate(&r->y, &r->y, 2); /* Y3 = -(L*(X3 + T) + S^2) (3) */ + rustsecp256k1_v0_10_0_fe_mul(&r->z, &a->z, &a->y); /* Z3 = Y1*Z1 (1) */ + rustsecp256k1_v0_10_0_fe_sqr(&s, &a->y); /* S = Y1^2 (1) */ + rustsecp256k1_v0_10_0_fe_sqr(&l, &a->x); /* L = X1^2 (1) */ + rustsecp256k1_v0_10_0_fe_mul_int(&l, 3); /* L = 3*X1^2 (3) */ + rustsecp256k1_v0_10_0_fe_half(&l); /* L = 3/2*X1^2 (2) */ + rustsecp256k1_v0_10_0_fe_negate(&t, &s, 1); /* T = -S (2) */ + rustsecp256k1_v0_10_0_fe_mul(&t, &t, &a->x); /* T = -X1*S (1) */ + rustsecp256k1_v0_10_0_fe_sqr(&r->x, &l); /* X3 = L^2 (1) */ + rustsecp256k1_v0_10_0_fe_add(&r->x, &t); /* X3 = L^2 + T (2) */ + rustsecp256k1_v0_10_0_fe_add(&r->x, &t); /* X3 = L^2 + 2*T (3) */ + rustsecp256k1_v0_10_0_fe_sqr(&s, &s); /* S' = S^2 (1) */ + rustsecp256k1_v0_10_0_fe_add(&t, &r->x); /* T' = X3 + T (4) */ + rustsecp256k1_v0_10_0_fe_mul(&r->y, &t, &l); /* Y3 = L*(X3 + T) (1) */ + rustsecp256k1_v0_10_0_fe_add(&r->y, &s); /* Y3 = L*(X3 + T) + S^2 (2) */ + rustsecp256k1_v0_10_0_fe_negate(&r->y, &r->y, 2); /* Y3 = -(L*(X3 + T) + S^2) (3) */ - rustsecp256k1_v0_9_2_gej_verify(r); + SECP256K1_GEJ_VERIFY(r); } -static void rustsecp256k1_v0_9_2_gej_double_var(rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_gej *a, rustsecp256k1_v0_9_2_fe *rzr) { - rustsecp256k1_v0_9_2_gej_verify(a); +static void rustsecp256k1_v0_10_0_gej_double_var(rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_gej *a, rustsecp256k1_v0_10_0_fe *rzr) { + SECP256K1_GEJ_VERIFY(a); /** For secp256k1, 2Q is infinity if and only if Q is infinity. This is because if 2Q = infinity, * Q must equal -Q, or that Q.y == -(Q.y), or Q.y is 0. For a point on y^2 = x^3 + 7 to have @@ -452,28 +475,28 @@ static void rustsecp256k1_v0_9_2_gej_double_var(rustsecp256k1_v0_9_2_gej *r, con * point will be gibberish (z = 0 but infinity = 0). */ if (a->infinity) { - rustsecp256k1_v0_9_2_gej_set_infinity(r); + rustsecp256k1_v0_10_0_gej_set_infinity(r); if (rzr != NULL) { - rustsecp256k1_v0_9_2_fe_set_int(rzr, 1); + rustsecp256k1_v0_10_0_fe_set_int(rzr, 1); } return; } if (rzr != NULL) { *rzr = a->y; - rustsecp256k1_v0_9_2_fe_normalize_weak(rzr); + rustsecp256k1_v0_10_0_fe_normalize_weak(rzr); } - rustsecp256k1_v0_9_2_gej_double(r, a); + rustsecp256k1_v0_10_0_gej_double(r, a); - rustsecp256k1_v0_9_2_gej_verify(r); + SECP256K1_GEJ_VERIFY(r); } -static void rustsecp256k1_v0_9_2_gej_add_var(rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_gej *a, const rustsecp256k1_v0_9_2_gej *b, rustsecp256k1_v0_9_2_fe *rzr) { +static void rustsecp256k1_v0_10_0_gej_add_var(rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_gej *a, const rustsecp256k1_v0_10_0_gej *b, rustsecp256k1_v0_10_0_fe *rzr) { /* 12 mul, 4 sqr, 11 add/negate/normalizes_to_zero (ignoring special cases) */ - rustsecp256k1_v0_9_2_fe z22, z12, u1, u2, s1, s2, h, i, h2, h3, t; - rustsecp256k1_v0_9_2_gej_verify(a); - rustsecp256k1_v0_9_2_gej_verify(b); + rustsecp256k1_v0_10_0_fe z22, z12, u1, u2, s1, s2, h, i, h2, h3, t; + SECP256K1_GEJ_VERIFY(a); + SECP256K1_GEJ_VERIFY(b); if (a->infinity) { VERIFY_CHECK(rzr == NULL); @@ -482,91 +505,91 @@ static void rustsecp256k1_v0_9_2_gej_add_var(rustsecp256k1_v0_9_2_gej *r, const } if (b->infinity) { if (rzr != NULL) { - rustsecp256k1_v0_9_2_fe_set_int(rzr, 1); + rustsecp256k1_v0_10_0_fe_set_int(rzr, 1); } *r = *a; return; } - rustsecp256k1_v0_9_2_fe_sqr(&z22, &b->z); - rustsecp256k1_v0_9_2_fe_sqr(&z12, &a->z); - rustsecp256k1_v0_9_2_fe_mul(&u1, &a->x, &z22); - rustsecp256k1_v0_9_2_fe_mul(&u2, &b->x, &z12); - rustsecp256k1_v0_9_2_fe_mul(&s1, &a->y, &z22); rustsecp256k1_v0_9_2_fe_mul(&s1, &s1, &b->z); - rustsecp256k1_v0_9_2_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_9_2_fe_mul(&s2, &s2, &a->z); - rustsecp256k1_v0_9_2_fe_negate(&h, &u1, 1); rustsecp256k1_v0_9_2_fe_add(&h, &u2); - rustsecp256k1_v0_9_2_fe_negate(&i, &s2, 1); rustsecp256k1_v0_9_2_fe_add(&i, &s1); - if (rustsecp256k1_v0_9_2_fe_normalizes_to_zero_var(&h)) { - if (rustsecp256k1_v0_9_2_fe_normalizes_to_zero_var(&i)) { - rustsecp256k1_v0_9_2_gej_double_var(r, a, rzr); + rustsecp256k1_v0_10_0_fe_sqr(&z22, &b->z); + rustsecp256k1_v0_10_0_fe_sqr(&z12, &a->z); + rustsecp256k1_v0_10_0_fe_mul(&u1, &a->x, &z22); + rustsecp256k1_v0_10_0_fe_mul(&u2, &b->x, &z12); + rustsecp256k1_v0_10_0_fe_mul(&s1, &a->y, &z22); rustsecp256k1_v0_10_0_fe_mul(&s1, &s1, &b->z); + rustsecp256k1_v0_10_0_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_10_0_fe_mul(&s2, &s2, &a->z); + rustsecp256k1_v0_10_0_fe_negate(&h, &u1, 1); rustsecp256k1_v0_10_0_fe_add(&h, &u2); + rustsecp256k1_v0_10_0_fe_negate(&i, &s2, 1); rustsecp256k1_v0_10_0_fe_add(&i, &s1); + if (rustsecp256k1_v0_10_0_fe_normalizes_to_zero_var(&h)) { + if (rustsecp256k1_v0_10_0_fe_normalizes_to_zero_var(&i)) { + rustsecp256k1_v0_10_0_gej_double_var(r, a, rzr); } else { if (rzr != NULL) { - rustsecp256k1_v0_9_2_fe_set_int(rzr, 0); + rustsecp256k1_v0_10_0_fe_set_int(rzr, 0); } - rustsecp256k1_v0_9_2_gej_set_infinity(r); + rustsecp256k1_v0_10_0_gej_set_infinity(r); } return; } r->infinity = 0; - rustsecp256k1_v0_9_2_fe_mul(&t, &h, &b->z); + rustsecp256k1_v0_10_0_fe_mul(&t, &h, &b->z); if (rzr != NULL) { *rzr = t; } - rustsecp256k1_v0_9_2_fe_mul(&r->z, &a->z, &t); + rustsecp256k1_v0_10_0_fe_mul(&r->z, &a->z, &t); - rustsecp256k1_v0_9_2_fe_sqr(&h2, &h); - rustsecp256k1_v0_9_2_fe_negate(&h2, &h2, 1); - rustsecp256k1_v0_9_2_fe_mul(&h3, &h2, &h); - rustsecp256k1_v0_9_2_fe_mul(&t, &u1, &h2); + rustsecp256k1_v0_10_0_fe_sqr(&h2, &h); + rustsecp256k1_v0_10_0_fe_negate(&h2, &h2, 1); + rustsecp256k1_v0_10_0_fe_mul(&h3, &h2, &h); + rustsecp256k1_v0_10_0_fe_mul(&t, &u1, &h2); - rustsecp256k1_v0_9_2_fe_sqr(&r->x, &i); - rustsecp256k1_v0_9_2_fe_add(&r->x, &h3); - rustsecp256k1_v0_9_2_fe_add(&r->x, &t); - rustsecp256k1_v0_9_2_fe_add(&r->x, &t); + rustsecp256k1_v0_10_0_fe_sqr(&r->x, &i); + rustsecp256k1_v0_10_0_fe_add(&r->x, &h3); + rustsecp256k1_v0_10_0_fe_add(&r->x, &t); + rustsecp256k1_v0_10_0_fe_add(&r->x, &t); - rustsecp256k1_v0_9_2_fe_add(&t, &r->x); - rustsecp256k1_v0_9_2_fe_mul(&r->y, &t, &i); - rustsecp256k1_v0_9_2_fe_mul(&h3, &h3, &s1); - rustsecp256k1_v0_9_2_fe_add(&r->y, &h3); + rustsecp256k1_v0_10_0_fe_add(&t, &r->x); + rustsecp256k1_v0_10_0_fe_mul(&r->y, &t, &i); + rustsecp256k1_v0_10_0_fe_mul(&h3, &h3, &s1); + rustsecp256k1_v0_10_0_fe_add(&r->y, &h3); - rustsecp256k1_v0_9_2_gej_verify(r); + SECP256K1_GEJ_VERIFY(r); } -static void rustsecp256k1_v0_9_2_gej_add_ge_var(rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_gej *a, const rustsecp256k1_v0_9_2_ge *b, rustsecp256k1_v0_9_2_fe *rzr) { +static void rustsecp256k1_v0_10_0_gej_add_ge_var(rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_gej *a, const rustsecp256k1_v0_10_0_ge *b, rustsecp256k1_v0_10_0_fe *rzr) { /* Operations: 8 mul, 3 sqr, 11 add/negate/normalizes_to_zero (ignoring special cases) */ - rustsecp256k1_v0_9_2_fe z12, u1, u2, s1, s2, h, i, h2, h3, t; - rustsecp256k1_v0_9_2_gej_verify(a); - rustsecp256k1_v0_9_2_ge_verify(b); + rustsecp256k1_v0_10_0_fe z12, u1, u2, s1, s2, h, i, h2, h3, t; + SECP256K1_GEJ_VERIFY(a); + SECP256K1_GE_VERIFY(b); if (a->infinity) { VERIFY_CHECK(rzr == NULL); - rustsecp256k1_v0_9_2_gej_set_ge(r, b); + rustsecp256k1_v0_10_0_gej_set_ge(r, b); return; } if (b->infinity) { if (rzr != NULL) { - rustsecp256k1_v0_9_2_fe_set_int(rzr, 1); + rustsecp256k1_v0_10_0_fe_set_int(rzr, 1); } *r = *a; return; } - rustsecp256k1_v0_9_2_fe_sqr(&z12, &a->z); + rustsecp256k1_v0_10_0_fe_sqr(&z12, &a->z); u1 = a->x; - rustsecp256k1_v0_9_2_fe_mul(&u2, &b->x, &z12); + rustsecp256k1_v0_10_0_fe_mul(&u2, &b->x, &z12); s1 = a->y; - rustsecp256k1_v0_9_2_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_9_2_fe_mul(&s2, &s2, &a->z); - rustsecp256k1_v0_9_2_fe_negate(&h, &u1, SECP256K1_GEJ_X_MAGNITUDE_MAX); rustsecp256k1_v0_9_2_fe_add(&h, &u2); - rustsecp256k1_v0_9_2_fe_negate(&i, &s2, 1); rustsecp256k1_v0_9_2_fe_add(&i, &s1); - if (rustsecp256k1_v0_9_2_fe_normalizes_to_zero_var(&h)) { - if (rustsecp256k1_v0_9_2_fe_normalizes_to_zero_var(&i)) { - rustsecp256k1_v0_9_2_gej_double_var(r, a, rzr); + rustsecp256k1_v0_10_0_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_10_0_fe_mul(&s2, &s2, &a->z); + rustsecp256k1_v0_10_0_fe_negate(&h, &u1, SECP256K1_GEJ_X_MAGNITUDE_MAX); rustsecp256k1_v0_10_0_fe_add(&h, &u2); + rustsecp256k1_v0_10_0_fe_negate(&i, &s2, 1); rustsecp256k1_v0_10_0_fe_add(&i, &s1); + if (rustsecp256k1_v0_10_0_fe_normalizes_to_zero_var(&h)) { + if (rustsecp256k1_v0_10_0_fe_normalizes_to_zero_var(&i)) { + rustsecp256k1_v0_10_0_gej_double_var(r, a, rzr); } else { if (rzr != NULL) { - rustsecp256k1_v0_9_2_fe_set_int(rzr, 0); + rustsecp256k1_v0_10_0_fe_set_int(rzr, 0); } - rustsecp256k1_v0_9_2_gej_set_infinity(r); + rustsecp256k1_v0_10_0_gej_set_infinity(r); } return; } @@ -575,43 +598,43 @@ static void rustsecp256k1_v0_9_2_gej_add_ge_var(rustsecp256k1_v0_9_2_gej *r, con if (rzr != NULL) { *rzr = h; } - rustsecp256k1_v0_9_2_fe_mul(&r->z, &a->z, &h); + rustsecp256k1_v0_10_0_fe_mul(&r->z, &a->z, &h); - rustsecp256k1_v0_9_2_fe_sqr(&h2, &h); - rustsecp256k1_v0_9_2_fe_negate(&h2, &h2, 1); - rustsecp256k1_v0_9_2_fe_mul(&h3, &h2, &h); - rustsecp256k1_v0_9_2_fe_mul(&t, &u1, &h2); + rustsecp256k1_v0_10_0_fe_sqr(&h2, &h); + rustsecp256k1_v0_10_0_fe_negate(&h2, &h2, 1); + rustsecp256k1_v0_10_0_fe_mul(&h3, &h2, &h); + rustsecp256k1_v0_10_0_fe_mul(&t, &u1, &h2); - rustsecp256k1_v0_9_2_fe_sqr(&r->x, &i); - rustsecp256k1_v0_9_2_fe_add(&r->x, &h3); - rustsecp256k1_v0_9_2_fe_add(&r->x, &t); - rustsecp256k1_v0_9_2_fe_add(&r->x, &t); + rustsecp256k1_v0_10_0_fe_sqr(&r->x, &i); + rustsecp256k1_v0_10_0_fe_add(&r->x, &h3); + rustsecp256k1_v0_10_0_fe_add(&r->x, &t); + rustsecp256k1_v0_10_0_fe_add(&r->x, &t); - rustsecp256k1_v0_9_2_fe_add(&t, &r->x); - rustsecp256k1_v0_9_2_fe_mul(&r->y, &t, &i); - rustsecp256k1_v0_9_2_fe_mul(&h3, &h3, &s1); - rustsecp256k1_v0_9_2_fe_add(&r->y, &h3); + rustsecp256k1_v0_10_0_fe_add(&t, &r->x); + rustsecp256k1_v0_10_0_fe_mul(&r->y, &t, &i); + rustsecp256k1_v0_10_0_fe_mul(&h3, &h3, &s1); + rustsecp256k1_v0_10_0_fe_add(&r->y, &h3); - rustsecp256k1_v0_9_2_gej_verify(r); - if (rzr != NULL) rustsecp256k1_v0_9_2_fe_verify(rzr); + SECP256K1_GEJ_VERIFY(r); + if (rzr != NULL) SECP256K1_FE_VERIFY(rzr); } -static void rustsecp256k1_v0_9_2_gej_add_zinv_var(rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_gej *a, const rustsecp256k1_v0_9_2_ge *b, const rustsecp256k1_v0_9_2_fe *bzinv) { +static void rustsecp256k1_v0_10_0_gej_add_zinv_var(rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_gej *a, const rustsecp256k1_v0_10_0_ge *b, const rustsecp256k1_v0_10_0_fe *bzinv) { /* Operations: 9 mul, 3 sqr, 11 add/negate/normalizes_to_zero (ignoring special cases) */ - rustsecp256k1_v0_9_2_fe az, z12, u1, u2, s1, s2, h, i, h2, h3, t; - rustsecp256k1_v0_9_2_gej_verify(a); - rustsecp256k1_v0_9_2_ge_verify(b); - rustsecp256k1_v0_9_2_fe_verify(bzinv); + rustsecp256k1_v0_10_0_fe az, z12, u1, u2, s1, s2, h, i, h2, h3, t; + SECP256K1_GEJ_VERIFY(a); + SECP256K1_GE_VERIFY(b); + SECP256K1_FE_VERIFY(bzinv); if (a->infinity) { - rustsecp256k1_v0_9_2_fe bzinv2, bzinv3; + rustsecp256k1_v0_10_0_fe bzinv2, bzinv3; r->infinity = b->infinity; - rustsecp256k1_v0_9_2_fe_sqr(&bzinv2, bzinv); - rustsecp256k1_v0_9_2_fe_mul(&bzinv3, &bzinv2, bzinv); - rustsecp256k1_v0_9_2_fe_mul(&r->x, &b->x, &bzinv2); - rustsecp256k1_v0_9_2_fe_mul(&r->y, &b->y, &bzinv3); - rustsecp256k1_v0_9_2_fe_set_int(&r->z, 1); - rustsecp256k1_v0_9_2_gej_verify(r); + rustsecp256k1_v0_10_0_fe_sqr(&bzinv2, bzinv); + rustsecp256k1_v0_10_0_fe_mul(&bzinv3, &bzinv2, bzinv); + rustsecp256k1_v0_10_0_fe_mul(&r->x, &b->x, &bzinv2); + rustsecp256k1_v0_10_0_fe_mul(&r->y, &b->y, &bzinv3); + rustsecp256k1_v0_10_0_fe_set_int(&r->z, 1); + SECP256K1_GEJ_VERIFY(r); return; } if (b->infinity) { @@ -627,53 +650,53 @@ static void rustsecp256k1_v0_9_2_gej_add_zinv_var(rustsecp256k1_v0_9_2_gej *r, c * The variable az below holds the modified Z coordinate for a, which is used * for the computation of rx and ry, but not for rz. */ - rustsecp256k1_v0_9_2_fe_mul(&az, &a->z, bzinv); + rustsecp256k1_v0_10_0_fe_mul(&az, &a->z, bzinv); - rustsecp256k1_v0_9_2_fe_sqr(&z12, &az); + rustsecp256k1_v0_10_0_fe_sqr(&z12, &az); u1 = a->x; - rustsecp256k1_v0_9_2_fe_mul(&u2, &b->x, &z12); + rustsecp256k1_v0_10_0_fe_mul(&u2, &b->x, &z12); s1 = a->y; - rustsecp256k1_v0_9_2_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_9_2_fe_mul(&s2, &s2, &az); - rustsecp256k1_v0_9_2_fe_negate(&h, &u1, SECP256K1_GEJ_X_MAGNITUDE_MAX); rustsecp256k1_v0_9_2_fe_add(&h, &u2); - rustsecp256k1_v0_9_2_fe_negate(&i, &s2, 1); rustsecp256k1_v0_9_2_fe_add(&i, &s1); - if (rustsecp256k1_v0_9_2_fe_normalizes_to_zero_var(&h)) { - if (rustsecp256k1_v0_9_2_fe_normalizes_to_zero_var(&i)) { - rustsecp256k1_v0_9_2_gej_double_var(r, a, NULL); + rustsecp256k1_v0_10_0_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_10_0_fe_mul(&s2, &s2, &az); + rustsecp256k1_v0_10_0_fe_negate(&h, &u1, SECP256K1_GEJ_X_MAGNITUDE_MAX); rustsecp256k1_v0_10_0_fe_add(&h, &u2); + rustsecp256k1_v0_10_0_fe_negate(&i, &s2, 1); rustsecp256k1_v0_10_0_fe_add(&i, &s1); + if (rustsecp256k1_v0_10_0_fe_normalizes_to_zero_var(&h)) { + if (rustsecp256k1_v0_10_0_fe_normalizes_to_zero_var(&i)) { + rustsecp256k1_v0_10_0_gej_double_var(r, a, NULL); } else { - rustsecp256k1_v0_9_2_gej_set_infinity(r); + rustsecp256k1_v0_10_0_gej_set_infinity(r); } return; } r->infinity = 0; - rustsecp256k1_v0_9_2_fe_mul(&r->z, &a->z, &h); + rustsecp256k1_v0_10_0_fe_mul(&r->z, &a->z, &h); - rustsecp256k1_v0_9_2_fe_sqr(&h2, &h); - rustsecp256k1_v0_9_2_fe_negate(&h2, &h2, 1); - rustsecp256k1_v0_9_2_fe_mul(&h3, &h2, &h); - rustsecp256k1_v0_9_2_fe_mul(&t, &u1, &h2); + rustsecp256k1_v0_10_0_fe_sqr(&h2, &h); + rustsecp256k1_v0_10_0_fe_negate(&h2, &h2, 1); + rustsecp256k1_v0_10_0_fe_mul(&h3, &h2, &h); + rustsecp256k1_v0_10_0_fe_mul(&t, &u1, &h2); - rustsecp256k1_v0_9_2_fe_sqr(&r->x, &i); - rustsecp256k1_v0_9_2_fe_add(&r->x, &h3); - rustsecp256k1_v0_9_2_fe_add(&r->x, &t); - rustsecp256k1_v0_9_2_fe_add(&r->x, &t); + rustsecp256k1_v0_10_0_fe_sqr(&r->x, &i); + rustsecp256k1_v0_10_0_fe_add(&r->x, &h3); + rustsecp256k1_v0_10_0_fe_add(&r->x, &t); + rustsecp256k1_v0_10_0_fe_add(&r->x, &t); - rustsecp256k1_v0_9_2_fe_add(&t, &r->x); - rustsecp256k1_v0_9_2_fe_mul(&r->y, &t, &i); - rustsecp256k1_v0_9_2_fe_mul(&h3, &h3, &s1); - rustsecp256k1_v0_9_2_fe_add(&r->y, &h3); + rustsecp256k1_v0_10_0_fe_add(&t, &r->x); + rustsecp256k1_v0_10_0_fe_mul(&r->y, &t, &i); + rustsecp256k1_v0_10_0_fe_mul(&h3, &h3, &s1); + rustsecp256k1_v0_10_0_fe_add(&r->y, &h3); - rustsecp256k1_v0_9_2_gej_verify(r); + SECP256K1_GEJ_VERIFY(r); } -static void rustsecp256k1_v0_9_2_gej_add_ge(rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_gej *a, const rustsecp256k1_v0_9_2_ge *b) { +static void rustsecp256k1_v0_10_0_gej_add_ge(rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_gej *a, const rustsecp256k1_v0_10_0_ge *b) { /* Operations: 7 mul, 5 sqr, 21 add/cmov/half/mul_int/negate/normalizes_to_zero */ - rustsecp256k1_v0_9_2_fe zz, u1, u2, s1, s2, t, tt, m, n, q, rr; - rustsecp256k1_v0_9_2_fe m_alt, rr_alt; + rustsecp256k1_v0_10_0_fe zz, u1, u2, s1, s2, t, tt, m, n, q, rr; + rustsecp256k1_v0_10_0_fe m_alt, rr_alt; int degenerate; - rustsecp256k1_v0_9_2_gej_verify(a); - rustsecp256k1_v0_9_2_ge_verify(b); + SECP256K1_GEJ_VERIFY(a); + SECP256K1_GE_VERIFY(b); VERIFY_CHECK(!b->infinity); /* In: @@ -726,62 +749,62 @@ static void rustsecp256k1_v0_9_2_gej_add_ge(rustsecp256k1_v0_9_2_gej *r, const r * so this covers everything. */ - rustsecp256k1_v0_9_2_fe_sqr(&zz, &a->z); /* z = Z1^2 */ + rustsecp256k1_v0_10_0_fe_sqr(&zz, &a->z); /* z = Z1^2 */ u1 = a->x; /* u1 = U1 = X1*Z2^2 (GEJ_X_M) */ - rustsecp256k1_v0_9_2_fe_mul(&u2, &b->x, &zz); /* u2 = U2 = X2*Z1^2 (1) */ + rustsecp256k1_v0_10_0_fe_mul(&u2, &b->x, &zz); /* u2 = U2 = X2*Z1^2 (1) */ s1 = a->y; /* s1 = S1 = Y1*Z2^3 (GEJ_Y_M) */ - rustsecp256k1_v0_9_2_fe_mul(&s2, &b->y, &zz); /* s2 = Y2*Z1^2 (1) */ - rustsecp256k1_v0_9_2_fe_mul(&s2, &s2, &a->z); /* s2 = S2 = Y2*Z1^3 (1) */ - t = u1; rustsecp256k1_v0_9_2_fe_add(&t, &u2); /* t = T = U1+U2 (GEJ_X_M+1) */ - m = s1; rustsecp256k1_v0_9_2_fe_add(&m, &s2); /* m = M = S1+S2 (GEJ_Y_M+1) */ - rustsecp256k1_v0_9_2_fe_sqr(&rr, &t); /* rr = T^2 (1) */ - rustsecp256k1_v0_9_2_fe_negate(&m_alt, &u2, 1); /* Malt = -X2*Z1^2 (2) */ - rustsecp256k1_v0_9_2_fe_mul(&tt, &u1, &m_alt); /* tt = -U1*U2 (1) */ - rustsecp256k1_v0_9_2_fe_add(&rr, &tt); /* rr = R = T^2-U1*U2 (2) */ + rustsecp256k1_v0_10_0_fe_mul(&s2, &b->y, &zz); /* s2 = Y2*Z1^2 (1) */ + rustsecp256k1_v0_10_0_fe_mul(&s2, &s2, &a->z); /* s2 = S2 = Y2*Z1^3 (1) */ + t = u1; rustsecp256k1_v0_10_0_fe_add(&t, &u2); /* t = T = U1+U2 (GEJ_X_M+1) */ + m = s1; rustsecp256k1_v0_10_0_fe_add(&m, &s2); /* m = M = S1+S2 (GEJ_Y_M+1) */ + rustsecp256k1_v0_10_0_fe_sqr(&rr, &t); /* rr = T^2 (1) */ + rustsecp256k1_v0_10_0_fe_negate(&m_alt, &u2, 1); /* Malt = -X2*Z1^2 (2) */ + rustsecp256k1_v0_10_0_fe_mul(&tt, &u1, &m_alt); /* tt = -U1*U2 (1) */ + rustsecp256k1_v0_10_0_fe_add(&rr, &tt); /* rr = R = T^2-U1*U2 (2) */ /* If lambda = R/M = R/0 we have a problem (except in the "trivial" * case that Z = z1z2 = 0, and this is special-cased later on). */ - degenerate = rustsecp256k1_v0_9_2_fe_normalizes_to_zero(&m); + degenerate = rustsecp256k1_v0_10_0_fe_normalizes_to_zero(&m); /* This only occurs when y1 == -y2 and x1^3 == x2^3, but x1 != x2. * This means either x1 == beta*x2 or beta*x1 == x2, where beta is * a nontrivial cube root of one. In either case, an alternate * non-indeterminate expression for lambda is (y1 - y2)/(x1 - x2), * so we set R/M equal to this. */ rr_alt = s1; - rustsecp256k1_v0_9_2_fe_mul_int(&rr_alt, 2); /* rr_alt = Y1*Z2^3 - Y2*Z1^3 (GEJ_Y_M*2) */ - rustsecp256k1_v0_9_2_fe_add(&m_alt, &u1); /* Malt = X1*Z2^2 - X2*Z1^2 (GEJ_X_M+2) */ + rustsecp256k1_v0_10_0_fe_mul_int(&rr_alt, 2); /* rr_alt = Y1*Z2^3 - Y2*Z1^3 (GEJ_Y_M*2) */ + rustsecp256k1_v0_10_0_fe_add(&m_alt, &u1); /* Malt = X1*Z2^2 - X2*Z1^2 (GEJ_X_M+2) */ - rustsecp256k1_v0_9_2_fe_cmov(&rr_alt, &rr, !degenerate); /* rr_alt (GEJ_Y_M*2) */ - rustsecp256k1_v0_9_2_fe_cmov(&m_alt, &m, !degenerate); /* m_alt (GEJ_X_M+2) */ + rustsecp256k1_v0_10_0_fe_cmov(&rr_alt, &rr, !degenerate); /* rr_alt (GEJ_Y_M*2) */ + rustsecp256k1_v0_10_0_fe_cmov(&m_alt, &m, !degenerate); /* m_alt (GEJ_X_M+2) */ /* Now Ralt / Malt = lambda and is guaranteed not to be Ralt / 0. * From here on out Ralt and Malt represent the numerator * and denominator of lambda; R and M represent the explicit * expressions x1^2 + x2^2 + x1x2 and y1 + y2. */ - rustsecp256k1_v0_9_2_fe_sqr(&n, &m_alt); /* n = Malt^2 (1) */ - rustsecp256k1_v0_9_2_fe_negate(&q, &t, + rustsecp256k1_v0_10_0_fe_sqr(&n, &m_alt); /* n = Malt^2 (1) */ + rustsecp256k1_v0_10_0_fe_negate(&q, &t, SECP256K1_GEJ_X_MAGNITUDE_MAX + 1); /* q = -T (GEJ_X_M+2) */ - rustsecp256k1_v0_9_2_fe_mul(&q, &q, &n); /* q = Q = -T*Malt^2 (1) */ + rustsecp256k1_v0_10_0_fe_mul(&q, &q, &n); /* q = Q = -T*Malt^2 (1) */ /* These two lines use the observation that either M == Malt or M == 0, * so M^3 * Malt is either Malt^4 (which is computed by squaring), or * zero (which is "computed" by cmov). So the cost is one squaring * versus two multiplications. */ - rustsecp256k1_v0_9_2_fe_sqr(&n, &n); /* n = Malt^4 (1) */ - rustsecp256k1_v0_9_2_fe_cmov(&n, &m, degenerate); /* n = M^3 * Malt (GEJ_Y_M+1) */ - rustsecp256k1_v0_9_2_fe_sqr(&t, &rr_alt); /* t = Ralt^2 (1) */ - rustsecp256k1_v0_9_2_fe_mul(&r->z, &a->z, &m_alt); /* r->z = Z3 = Malt*Z (1) */ - rustsecp256k1_v0_9_2_fe_add(&t, &q); /* t = Ralt^2 + Q (2) */ + rustsecp256k1_v0_10_0_fe_sqr(&n, &n); /* n = Malt^4 (1) */ + rustsecp256k1_v0_10_0_fe_cmov(&n, &m, degenerate); /* n = M^3 * Malt (GEJ_Y_M+1) */ + rustsecp256k1_v0_10_0_fe_sqr(&t, &rr_alt); /* t = Ralt^2 (1) */ + rustsecp256k1_v0_10_0_fe_mul(&r->z, &a->z, &m_alt); /* r->z = Z3 = Malt*Z (1) */ + rustsecp256k1_v0_10_0_fe_add(&t, &q); /* t = Ralt^2 + Q (2) */ r->x = t; /* r->x = X3 = Ralt^2 + Q (2) */ - rustsecp256k1_v0_9_2_fe_mul_int(&t, 2); /* t = 2*X3 (4) */ - rustsecp256k1_v0_9_2_fe_add(&t, &q); /* t = 2*X3 + Q (5) */ - rustsecp256k1_v0_9_2_fe_mul(&t, &t, &rr_alt); /* t = Ralt*(2*X3 + Q) (1) */ - rustsecp256k1_v0_9_2_fe_add(&t, &n); /* t = Ralt*(2*X3 + Q) + M^3*Malt (GEJ_Y_M+2) */ - rustsecp256k1_v0_9_2_fe_negate(&r->y, &t, + rustsecp256k1_v0_10_0_fe_mul_int(&t, 2); /* t = 2*X3 (4) */ + rustsecp256k1_v0_10_0_fe_add(&t, &q); /* t = 2*X3 + Q (5) */ + rustsecp256k1_v0_10_0_fe_mul(&t, &t, &rr_alt); /* t = Ralt*(2*X3 + Q) (1) */ + rustsecp256k1_v0_10_0_fe_add(&t, &n); /* t = Ralt*(2*X3 + Q) + M^3*Malt (GEJ_Y_M+2) */ + rustsecp256k1_v0_10_0_fe_negate(&r->y, &t, SECP256K1_GEJ_Y_MAGNITUDE_MAX + 2); /* r->y = -(Ralt*(2*X3 + Q) + M^3*Malt) (GEJ_Y_M+3) */ - rustsecp256k1_v0_9_2_fe_half(&r->y); /* r->y = Y3 = -(Ralt*(2*X3 + Q) + M^3*Malt)/2 ((GEJ_Y_M+3)/2 + 1) */ + rustsecp256k1_v0_10_0_fe_half(&r->y); /* r->y = Y3 = -(Ralt*(2*X3 + Q) + M^3*Malt)/2 ((GEJ_Y_M+3)/2 + 1) */ /* In case a->infinity == 1, replace r with (b->x, b->y, 1). */ - rustsecp256k1_v0_9_2_fe_cmov(&r->x, &b->x, a->infinity); - rustsecp256k1_v0_9_2_fe_cmov(&r->y, &b->y, a->infinity); - rustsecp256k1_v0_9_2_fe_cmov(&r->z, &rustsecp256k1_v0_9_2_fe_one, a->infinity); + rustsecp256k1_v0_10_0_fe_cmov(&r->x, &b->x, a->infinity); + rustsecp256k1_v0_10_0_fe_cmov(&r->y, &b->y, a->infinity); + rustsecp256k1_v0_10_0_fe_cmov(&r->z, &rustsecp256k1_v0_10_0_fe_one, a->infinity); /* Set r->infinity if r->z is 0. * @@ -799,93 +822,91 @@ static void rustsecp256k1_v0_9_2_gej_add_ge(rustsecp256k1_v0_9_2_gej *r, const r * In this case, we can't have a = -b. * We have degenerate = false, r->z = (y1 + y2) * Z. * Then r->infinity = ((y1 + y2)Z == 0) = (y1 == -y2) = false. */ - r->infinity = rustsecp256k1_v0_9_2_fe_normalizes_to_zero(&r->z); + r->infinity = rustsecp256k1_v0_10_0_fe_normalizes_to_zero(&r->z); - rustsecp256k1_v0_9_2_gej_verify(r); + SECP256K1_GEJ_VERIFY(r); } -static void rustsecp256k1_v0_9_2_gej_rescale(rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_fe *s) { +static void rustsecp256k1_v0_10_0_gej_rescale(rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_fe *s) { /* Operations: 4 mul, 1 sqr */ - rustsecp256k1_v0_9_2_fe zz; - rustsecp256k1_v0_9_2_gej_verify(r); - rustsecp256k1_v0_9_2_fe_verify(s); -#ifdef VERIFY - VERIFY_CHECK(!rustsecp256k1_v0_9_2_fe_normalizes_to_zero_var(s)); -#endif + rustsecp256k1_v0_10_0_fe zz; + SECP256K1_GEJ_VERIFY(r); + SECP256K1_FE_VERIFY(s); + VERIFY_CHECK(!rustsecp256k1_v0_10_0_fe_normalizes_to_zero_var(s)); - rustsecp256k1_v0_9_2_fe_sqr(&zz, s); - rustsecp256k1_v0_9_2_fe_mul(&r->x, &r->x, &zz); /* r->x *= s^2 */ - rustsecp256k1_v0_9_2_fe_mul(&r->y, &r->y, &zz); - rustsecp256k1_v0_9_2_fe_mul(&r->y, &r->y, s); /* r->y *= s^3 */ - rustsecp256k1_v0_9_2_fe_mul(&r->z, &r->z, s); /* r->z *= s */ + rustsecp256k1_v0_10_0_fe_sqr(&zz, s); + rustsecp256k1_v0_10_0_fe_mul(&r->x, &r->x, &zz); /* r->x *= s^2 */ + rustsecp256k1_v0_10_0_fe_mul(&r->y, &r->y, &zz); + rustsecp256k1_v0_10_0_fe_mul(&r->y, &r->y, s); /* r->y *= s^3 */ + rustsecp256k1_v0_10_0_fe_mul(&r->z, &r->z, s); /* r->z *= s */ - rustsecp256k1_v0_9_2_gej_verify(r); + SECP256K1_GEJ_VERIFY(r); } -static void rustsecp256k1_v0_9_2_ge_to_storage(rustsecp256k1_v0_9_2_ge_storage *r, const rustsecp256k1_v0_9_2_ge *a) { - rustsecp256k1_v0_9_2_fe x, y; - rustsecp256k1_v0_9_2_ge_verify(a); +static void rustsecp256k1_v0_10_0_ge_to_storage(rustsecp256k1_v0_10_0_ge_storage *r, const rustsecp256k1_v0_10_0_ge *a) { + rustsecp256k1_v0_10_0_fe x, y; + SECP256K1_GE_VERIFY(a); VERIFY_CHECK(!a->infinity); x = a->x; - rustsecp256k1_v0_9_2_fe_normalize(&x); + rustsecp256k1_v0_10_0_fe_normalize(&x); y = a->y; - rustsecp256k1_v0_9_2_fe_normalize(&y); - rustsecp256k1_v0_9_2_fe_to_storage(&r->x, &x); - rustsecp256k1_v0_9_2_fe_to_storage(&r->y, &y); + rustsecp256k1_v0_10_0_fe_normalize(&y); + rustsecp256k1_v0_10_0_fe_to_storage(&r->x, &x); + rustsecp256k1_v0_10_0_fe_to_storage(&r->y, &y); } -static void rustsecp256k1_v0_9_2_ge_from_storage(rustsecp256k1_v0_9_2_ge *r, const rustsecp256k1_v0_9_2_ge_storage *a) { - rustsecp256k1_v0_9_2_fe_from_storage(&r->x, &a->x); - rustsecp256k1_v0_9_2_fe_from_storage(&r->y, &a->y); +static void rustsecp256k1_v0_10_0_ge_from_storage(rustsecp256k1_v0_10_0_ge *r, const rustsecp256k1_v0_10_0_ge_storage *a) { + rustsecp256k1_v0_10_0_fe_from_storage(&r->x, &a->x); + rustsecp256k1_v0_10_0_fe_from_storage(&r->y, &a->y); r->infinity = 0; - rustsecp256k1_v0_9_2_ge_verify(r); + SECP256K1_GE_VERIFY(r); } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_gej_cmov(rustsecp256k1_v0_9_2_gej *r, const rustsecp256k1_v0_9_2_gej *a, int flag) { - rustsecp256k1_v0_9_2_gej_verify(r); - rustsecp256k1_v0_9_2_gej_verify(a); +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_gej_cmov(rustsecp256k1_v0_10_0_gej *r, const rustsecp256k1_v0_10_0_gej *a, int flag) { + SECP256K1_GEJ_VERIFY(r); + SECP256K1_GEJ_VERIFY(a); - rustsecp256k1_v0_9_2_fe_cmov(&r->x, &a->x, flag); - rustsecp256k1_v0_9_2_fe_cmov(&r->y, &a->y, flag); - rustsecp256k1_v0_9_2_fe_cmov(&r->z, &a->z, flag); + rustsecp256k1_v0_10_0_fe_cmov(&r->x, &a->x, flag); + rustsecp256k1_v0_10_0_fe_cmov(&r->y, &a->y, flag); + rustsecp256k1_v0_10_0_fe_cmov(&r->z, &a->z, flag); r->infinity ^= (r->infinity ^ a->infinity) & flag; - rustsecp256k1_v0_9_2_gej_verify(r); + SECP256K1_GEJ_VERIFY(r); } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_ge_storage_cmov(rustsecp256k1_v0_9_2_ge_storage *r, const rustsecp256k1_v0_9_2_ge_storage *a, int flag) { - rustsecp256k1_v0_9_2_fe_storage_cmov(&r->x, &a->x, flag); - rustsecp256k1_v0_9_2_fe_storage_cmov(&r->y, &a->y, flag); +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_ge_storage_cmov(rustsecp256k1_v0_10_0_ge_storage *r, const rustsecp256k1_v0_10_0_ge_storage *a, int flag) { + rustsecp256k1_v0_10_0_fe_storage_cmov(&r->x, &a->x, flag); + rustsecp256k1_v0_10_0_fe_storage_cmov(&r->y, &a->y, flag); } -static void rustsecp256k1_v0_9_2_ge_mul_lambda(rustsecp256k1_v0_9_2_ge *r, const rustsecp256k1_v0_9_2_ge *a) { - rustsecp256k1_v0_9_2_ge_verify(a); +static void rustsecp256k1_v0_10_0_ge_mul_lambda(rustsecp256k1_v0_10_0_ge *r, const rustsecp256k1_v0_10_0_ge *a) { + SECP256K1_GE_VERIFY(a); *r = *a; - rustsecp256k1_v0_9_2_fe_mul(&r->x, &r->x, &rustsecp256k1_v0_9_2_const_beta); + rustsecp256k1_v0_10_0_fe_mul(&r->x, &r->x, &rustsecp256k1_v0_10_0_const_beta); - rustsecp256k1_v0_9_2_ge_verify(r); + SECP256K1_GE_VERIFY(r); } -static int rustsecp256k1_v0_9_2_ge_is_in_correct_subgroup(const rustsecp256k1_v0_9_2_ge* ge) { +static int rustsecp256k1_v0_10_0_ge_is_in_correct_subgroup(const rustsecp256k1_v0_10_0_ge* ge) { #ifdef EXHAUSTIVE_TEST_ORDER - rustsecp256k1_v0_9_2_gej out; + rustsecp256k1_v0_10_0_gej out; int i; - rustsecp256k1_v0_9_2_ge_verify(ge); + SECP256K1_GE_VERIFY(ge); /* A very simple EC multiplication ladder that avoids a dependency on ecmult. */ - rustsecp256k1_v0_9_2_gej_set_infinity(&out); + rustsecp256k1_v0_10_0_gej_set_infinity(&out); for (i = 0; i < 32; ++i) { - rustsecp256k1_v0_9_2_gej_double_var(&out, &out, NULL); + rustsecp256k1_v0_10_0_gej_double_var(&out, &out, NULL); if ((((uint32_t)EXHAUSTIVE_TEST_ORDER) >> (31 - i)) & 1) { - rustsecp256k1_v0_9_2_gej_add_ge_var(&out, &out, ge, NULL); + rustsecp256k1_v0_10_0_gej_add_ge_var(&out, &out, ge, NULL); } } - return rustsecp256k1_v0_9_2_gej_is_infinity(&out); + return rustsecp256k1_v0_10_0_gej_is_infinity(&out); #else - rustsecp256k1_v0_9_2_ge_verify(ge); + SECP256K1_GE_VERIFY(ge); (void)ge; /* The real secp256k1 group has cofactor 1, so the subgroup is the entire curve. */ @@ -893,32 +914,31 @@ static int rustsecp256k1_v0_9_2_ge_is_in_correct_subgroup(const rustsecp256k1_v0 #endif } -static int rustsecp256k1_v0_9_2_ge_x_on_curve_var(const rustsecp256k1_v0_9_2_fe *x) { - rustsecp256k1_v0_9_2_fe c; - rustsecp256k1_v0_9_2_fe_sqr(&c, x); - rustsecp256k1_v0_9_2_fe_mul(&c, &c, x); - rustsecp256k1_v0_9_2_fe_add_int(&c, SECP256K1_B); - return rustsecp256k1_v0_9_2_fe_is_square_var(&c); +static int rustsecp256k1_v0_10_0_ge_x_on_curve_var(const rustsecp256k1_v0_10_0_fe *x) { + rustsecp256k1_v0_10_0_fe c; + rustsecp256k1_v0_10_0_fe_sqr(&c, x); + rustsecp256k1_v0_10_0_fe_mul(&c, &c, x); + rustsecp256k1_v0_10_0_fe_add_int(&c, SECP256K1_B); + return rustsecp256k1_v0_10_0_fe_is_square_var(&c); } -static int rustsecp256k1_v0_9_2_ge_x_frac_on_curve_var(const rustsecp256k1_v0_9_2_fe *xn, const rustsecp256k1_v0_9_2_fe *xd) { +static int rustsecp256k1_v0_10_0_ge_x_frac_on_curve_var(const rustsecp256k1_v0_10_0_fe *xn, const rustsecp256k1_v0_10_0_fe *xd) { /* We want to determine whether (xn/xd) is on the curve. * * (xn/xd)^3 + 7 is square <=> xd*xn^3 + 7*xd^4 is square (multiplying by xd^4, a square). */ - rustsecp256k1_v0_9_2_fe r, t; -#ifdef VERIFY - VERIFY_CHECK(!rustsecp256k1_v0_9_2_fe_normalizes_to_zero_var(xd)); -#endif - rustsecp256k1_v0_9_2_fe_mul(&r, xd, xn); /* r = xd*xn */ - rustsecp256k1_v0_9_2_fe_sqr(&t, xn); /* t = xn^2 */ - rustsecp256k1_v0_9_2_fe_mul(&r, &r, &t); /* r = xd*xn^3 */ - rustsecp256k1_v0_9_2_fe_sqr(&t, xd); /* t = xd^2 */ - rustsecp256k1_v0_9_2_fe_sqr(&t, &t); /* t = xd^4 */ + rustsecp256k1_v0_10_0_fe r, t; + VERIFY_CHECK(!rustsecp256k1_v0_10_0_fe_normalizes_to_zero_var(xd)); + + rustsecp256k1_v0_10_0_fe_mul(&r, xd, xn); /* r = xd*xn */ + rustsecp256k1_v0_10_0_fe_sqr(&t, xn); /* t = xn^2 */ + rustsecp256k1_v0_10_0_fe_mul(&r, &r, &t); /* r = xd*xn^3 */ + rustsecp256k1_v0_10_0_fe_sqr(&t, xd); /* t = xd^2 */ + rustsecp256k1_v0_10_0_fe_sqr(&t, &t); /* t = xd^4 */ VERIFY_CHECK(SECP256K1_B <= 31); - rustsecp256k1_v0_9_2_fe_mul_int(&t, SECP256K1_B); /* t = 7*xd^4 */ - rustsecp256k1_v0_9_2_fe_add(&r, &t); /* r = xd*xn^3 + 7*xd^4 */ - return rustsecp256k1_v0_9_2_fe_is_square_var(&r); + rustsecp256k1_v0_10_0_fe_mul_int(&t, SECP256K1_B); /* t = 7*xd^4 */ + rustsecp256k1_v0_10_0_fe_add(&r, &t); /* r = xd*xn^3 + 7*xd^4 */ + return rustsecp256k1_v0_10_0_fe_is_square_var(&r); } #endif /* SECP256K1_GROUP_IMPL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/hash.h b/secp256k1-sys/depend/secp256k1/src/hash.h index 43bd893a2..cdf439eeb 100644 --- a/secp256k1-sys/depend/secp256k1/src/hash.h +++ b/secp256k1-sys/depend/secp256k1/src/hash.h @@ -14,28 +14,28 @@ typedef struct { uint32_t s[8]; unsigned char buf[64]; uint64_t bytes; -} rustsecp256k1_v0_9_2_sha256; +} rustsecp256k1_v0_10_0_sha256; -static void rustsecp256k1_v0_9_2_sha256_initialize(rustsecp256k1_v0_9_2_sha256 *hash); -static void rustsecp256k1_v0_9_2_sha256_write(rustsecp256k1_v0_9_2_sha256 *hash, const unsigned char *data, size_t size); -static void rustsecp256k1_v0_9_2_sha256_finalize(rustsecp256k1_v0_9_2_sha256 *hash, unsigned char *out32); +static void rustsecp256k1_v0_10_0_sha256_initialize(rustsecp256k1_v0_10_0_sha256 *hash); +static void rustsecp256k1_v0_10_0_sha256_write(rustsecp256k1_v0_10_0_sha256 *hash, const unsigned char *data, size_t size); +static void rustsecp256k1_v0_10_0_sha256_finalize(rustsecp256k1_v0_10_0_sha256 *hash, unsigned char *out32); typedef struct { - rustsecp256k1_v0_9_2_sha256 inner, outer; -} rustsecp256k1_v0_9_2_hmac_sha256; + rustsecp256k1_v0_10_0_sha256 inner, outer; +} rustsecp256k1_v0_10_0_hmac_sha256; -static void rustsecp256k1_v0_9_2_hmac_sha256_initialize(rustsecp256k1_v0_9_2_hmac_sha256 *hash, const unsigned char *key, size_t size); -static void rustsecp256k1_v0_9_2_hmac_sha256_write(rustsecp256k1_v0_9_2_hmac_sha256 *hash, const unsigned char *data, size_t size); -static void rustsecp256k1_v0_9_2_hmac_sha256_finalize(rustsecp256k1_v0_9_2_hmac_sha256 *hash, unsigned char *out32); +static void rustsecp256k1_v0_10_0_hmac_sha256_initialize(rustsecp256k1_v0_10_0_hmac_sha256 *hash, const unsigned char *key, size_t size); +static void rustsecp256k1_v0_10_0_hmac_sha256_write(rustsecp256k1_v0_10_0_hmac_sha256 *hash, const unsigned char *data, size_t size); +static void rustsecp256k1_v0_10_0_hmac_sha256_finalize(rustsecp256k1_v0_10_0_hmac_sha256 *hash, unsigned char *out32); typedef struct { unsigned char v[32]; unsigned char k[32]; int retry; -} rustsecp256k1_v0_9_2_rfc6979_hmac_sha256; +} rustsecp256k1_v0_10_0_rfc6979_hmac_sha256; -static void rustsecp256k1_v0_9_2_rfc6979_hmac_sha256_initialize(rustsecp256k1_v0_9_2_rfc6979_hmac_sha256 *rng, const unsigned char *key, size_t keylen); -static void rustsecp256k1_v0_9_2_rfc6979_hmac_sha256_generate(rustsecp256k1_v0_9_2_rfc6979_hmac_sha256 *rng, unsigned char *out, size_t outlen); -static void rustsecp256k1_v0_9_2_rfc6979_hmac_sha256_finalize(rustsecp256k1_v0_9_2_rfc6979_hmac_sha256 *rng); +static void rustsecp256k1_v0_10_0_rfc6979_hmac_sha256_initialize(rustsecp256k1_v0_10_0_rfc6979_hmac_sha256 *rng, const unsigned char *key, size_t keylen); +static void rustsecp256k1_v0_10_0_rfc6979_hmac_sha256_generate(rustsecp256k1_v0_10_0_rfc6979_hmac_sha256 *rng, unsigned char *out, size_t outlen); +static void rustsecp256k1_v0_10_0_rfc6979_hmac_sha256_finalize(rustsecp256k1_v0_10_0_rfc6979_hmac_sha256 *rng); #endif /* SECP256K1_HASH_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/hash_impl.h b/secp256k1-sys/depend/secp256k1/src/hash_impl.h index 64b6e180c..8a1000de1 100644 --- a/secp256k1-sys/depend/secp256k1/src/hash_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/hash_impl.h @@ -28,7 +28,7 @@ (h) = t1 + t2; \ } while(0) -static void rustsecp256k1_v0_9_2_sha256_initialize(rustsecp256k1_v0_9_2_sha256 *hash) { +static void rustsecp256k1_v0_10_0_sha256_initialize(rustsecp256k1_v0_10_0_sha256 *hash) { hash->s[0] = 0x6a09e667ul; hash->s[1] = 0xbb67ae85ul; hash->s[2] = 0x3c6ef372ul; @@ -41,26 +41,26 @@ static void rustsecp256k1_v0_9_2_sha256_initialize(rustsecp256k1_v0_9_2_sha256 * } /** Perform one SHA-256 transformation, processing 16 big endian 32-bit words. */ -static void rustsecp256k1_v0_9_2_sha256_transform(uint32_t* s, const unsigned char* buf) { +static void rustsecp256k1_v0_10_0_sha256_transform(uint32_t* s, const unsigned char* buf) { uint32_t a = s[0], b = s[1], c = s[2], d = s[3], e = s[4], f = s[5], g = s[6], h = s[7]; uint32_t w0, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15; - Round(a, b, c, d, e, f, g, h, 0x428a2f98, w0 = rustsecp256k1_v0_9_2_read_be32(&buf[0])); - Round(h, a, b, c, d, e, f, g, 0x71374491, w1 = rustsecp256k1_v0_9_2_read_be32(&buf[4])); - Round(g, h, a, b, c, d, e, f, 0xb5c0fbcf, w2 = rustsecp256k1_v0_9_2_read_be32(&buf[8])); - Round(f, g, h, a, b, c, d, e, 0xe9b5dba5, w3 = rustsecp256k1_v0_9_2_read_be32(&buf[12])); - Round(e, f, g, h, a, b, c, d, 0x3956c25b, w4 = rustsecp256k1_v0_9_2_read_be32(&buf[16])); - Round(d, e, f, g, h, a, b, c, 0x59f111f1, w5 = rustsecp256k1_v0_9_2_read_be32(&buf[20])); - Round(c, d, e, f, g, h, a, b, 0x923f82a4, w6 = rustsecp256k1_v0_9_2_read_be32(&buf[24])); - Round(b, c, d, e, f, g, h, a, 0xab1c5ed5, w7 = rustsecp256k1_v0_9_2_read_be32(&buf[28])); - Round(a, b, c, d, e, f, g, h, 0xd807aa98, w8 = rustsecp256k1_v0_9_2_read_be32(&buf[32])); - Round(h, a, b, c, d, e, f, g, 0x12835b01, w9 = rustsecp256k1_v0_9_2_read_be32(&buf[36])); - Round(g, h, a, b, c, d, e, f, 0x243185be, w10 = rustsecp256k1_v0_9_2_read_be32(&buf[40])); - Round(f, g, h, a, b, c, d, e, 0x550c7dc3, w11 = rustsecp256k1_v0_9_2_read_be32(&buf[44])); - Round(e, f, g, h, a, b, c, d, 0x72be5d74, w12 = rustsecp256k1_v0_9_2_read_be32(&buf[48])); - Round(d, e, f, g, h, a, b, c, 0x80deb1fe, w13 = rustsecp256k1_v0_9_2_read_be32(&buf[52])); - Round(c, d, e, f, g, h, a, b, 0x9bdc06a7, w14 = rustsecp256k1_v0_9_2_read_be32(&buf[56])); - Round(b, c, d, e, f, g, h, a, 0xc19bf174, w15 = rustsecp256k1_v0_9_2_read_be32(&buf[60])); + Round(a, b, c, d, e, f, g, h, 0x428a2f98, w0 = rustsecp256k1_v0_10_0_read_be32(&buf[0])); + Round(h, a, b, c, d, e, f, g, 0x71374491, w1 = rustsecp256k1_v0_10_0_read_be32(&buf[4])); + Round(g, h, a, b, c, d, e, f, 0xb5c0fbcf, w2 = rustsecp256k1_v0_10_0_read_be32(&buf[8])); + Round(f, g, h, a, b, c, d, e, 0xe9b5dba5, w3 = rustsecp256k1_v0_10_0_read_be32(&buf[12])); + Round(e, f, g, h, a, b, c, d, 0x3956c25b, w4 = rustsecp256k1_v0_10_0_read_be32(&buf[16])); + Round(d, e, f, g, h, a, b, c, 0x59f111f1, w5 = rustsecp256k1_v0_10_0_read_be32(&buf[20])); + Round(c, d, e, f, g, h, a, b, 0x923f82a4, w6 = rustsecp256k1_v0_10_0_read_be32(&buf[24])); + Round(b, c, d, e, f, g, h, a, 0xab1c5ed5, w7 = rustsecp256k1_v0_10_0_read_be32(&buf[28])); + Round(a, b, c, d, e, f, g, h, 0xd807aa98, w8 = rustsecp256k1_v0_10_0_read_be32(&buf[32])); + Round(h, a, b, c, d, e, f, g, 0x12835b01, w9 = rustsecp256k1_v0_10_0_read_be32(&buf[36])); + Round(g, h, a, b, c, d, e, f, 0x243185be, w10 = rustsecp256k1_v0_10_0_read_be32(&buf[40])); + Round(f, g, h, a, b, c, d, e, 0x550c7dc3, w11 = rustsecp256k1_v0_10_0_read_be32(&buf[44])); + Round(e, f, g, h, a, b, c, d, 0x72be5d74, w12 = rustsecp256k1_v0_10_0_read_be32(&buf[48])); + Round(d, e, f, g, h, a, b, c, 0x80deb1fe, w13 = rustsecp256k1_v0_10_0_read_be32(&buf[52])); + Round(c, d, e, f, g, h, a, b, 0x9bdc06a7, w14 = rustsecp256k1_v0_10_0_read_be32(&buf[56])); + Round(b, c, d, e, f, g, h, a, 0xc19bf174, w15 = rustsecp256k1_v0_10_0_read_be32(&buf[60])); Round(a, b, c, d, e, f, g, h, 0xe49b69c1, w0 += sigma1(w14) + w9 + sigma0(w1)); Round(h, a, b, c, d, e, f, g, 0xefbe4786, w1 += sigma1(w15) + w10 + sigma0(w2)); @@ -123,7 +123,7 @@ static void rustsecp256k1_v0_9_2_sha256_transform(uint32_t* s, const unsigned ch s[7] += h; } -static void rustsecp256k1_v0_9_2_sha256_write(rustsecp256k1_v0_9_2_sha256 *hash, const unsigned char *data, size_t len) { +static void rustsecp256k1_v0_10_0_sha256_write(rustsecp256k1_v0_10_0_sha256 *hash, const unsigned char *data, size_t len) { size_t bufsize = hash->bytes & 0x3F; hash->bytes += len; VERIFY_CHECK(hash->bytes >= len); @@ -133,7 +133,7 @@ static void rustsecp256k1_v0_9_2_sha256_write(rustsecp256k1_v0_9_2_sha256 *hash, memcpy(hash->buf + bufsize, data, chunk_len); data += chunk_len; len -= chunk_len; - rustsecp256k1_v0_9_2_sha256_transform(hash->s, hash->buf); + rustsecp256k1_v0_10_0_sha256_transform(hash->s, hash->buf); bufsize = 0; } if (len) { @@ -142,78 +142,78 @@ static void rustsecp256k1_v0_9_2_sha256_write(rustsecp256k1_v0_9_2_sha256 *hash, } } -static void rustsecp256k1_v0_9_2_sha256_finalize(rustsecp256k1_v0_9_2_sha256 *hash, unsigned char *out32) { +static void rustsecp256k1_v0_10_0_sha256_finalize(rustsecp256k1_v0_10_0_sha256 *hash, unsigned char *out32) { static const unsigned char pad[64] = {0x80}; unsigned char sizedesc[8]; int i; /* The maximum message size of SHA256 is 2^64-1 bits. */ VERIFY_CHECK(hash->bytes < ((uint64_t)1 << 61)); - rustsecp256k1_v0_9_2_write_be32(&sizedesc[0], hash->bytes >> 29); - rustsecp256k1_v0_9_2_write_be32(&sizedesc[4], hash->bytes << 3); - rustsecp256k1_v0_9_2_sha256_write(hash, pad, 1 + ((119 - (hash->bytes % 64)) % 64)); - rustsecp256k1_v0_9_2_sha256_write(hash, sizedesc, 8); + rustsecp256k1_v0_10_0_write_be32(&sizedesc[0], hash->bytes >> 29); + rustsecp256k1_v0_10_0_write_be32(&sizedesc[4], hash->bytes << 3); + rustsecp256k1_v0_10_0_sha256_write(hash, pad, 1 + ((119 - (hash->bytes % 64)) % 64)); + rustsecp256k1_v0_10_0_sha256_write(hash, sizedesc, 8); for (i = 0; i < 8; i++) { - rustsecp256k1_v0_9_2_write_be32(&out32[4*i], hash->s[i]); + rustsecp256k1_v0_10_0_write_be32(&out32[4*i], hash->s[i]); hash->s[i] = 0; } } /* Initializes a sha256 struct and writes the 64 byte string * SHA256(tag)||SHA256(tag) into it. */ -static void rustsecp256k1_v0_9_2_sha256_initialize_tagged(rustsecp256k1_v0_9_2_sha256 *hash, const unsigned char *tag, size_t taglen) { +static void rustsecp256k1_v0_10_0_sha256_initialize_tagged(rustsecp256k1_v0_10_0_sha256 *hash, const unsigned char *tag, size_t taglen) { unsigned char buf[32]; - rustsecp256k1_v0_9_2_sha256_initialize(hash); - rustsecp256k1_v0_9_2_sha256_write(hash, tag, taglen); - rustsecp256k1_v0_9_2_sha256_finalize(hash, buf); + rustsecp256k1_v0_10_0_sha256_initialize(hash); + rustsecp256k1_v0_10_0_sha256_write(hash, tag, taglen); + rustsecp256k1_v0_10_0_sha256_finalize(hash, buf); - rustsecp256k1_v0_9_2_sha256_initialize(hash); - rustsecp256k1_v0_9_2_sha256_write(hash, buf, 32); - rustsecp256k1_v0_9_2_sha256_write(hash, buf, 32); + rustsecp256k1_v0_10_0_sha256_initialize(hash); + rustsecp256k1_v0_10_0_sha256_write(hash, buf, 32); + rustsecp256k1_v0_10_0_sha256_write(hash, buf, 32); } -static void rustsecp256k1_v0_9_2_hmac_sha256_initialize(rustsecp256k1_v0_9_2_hmac_sha256 *hash, const unsigned char *key, size_t keylen) { +static void rustsecp256k1_v0_10_0_hmac_sha256_initialize(rustsecp256k1_v0_10_0_hmac_sha256 *hash, const unsigned char *key, size_t keylen) { size_t n; unsigned char rkey[64]; if (keylen <= sizeof(rkey)) { memcpy(rkey, key, keylen); memset(rkey + keylen, 0, sizeof(rkey) - keylen); } else { - rustsecp256k1_v0_9_2_sha256 sha256; - rustsecp256k1_v0_9_2_sha256_initialize(&sha256); - rustsecp256k1_v0_9_2_sha256_write(&sha256, key, keylen); - rustsecp256k1_v0_9_2_sha256_finalize(&sha256, rkey); + rustsecp256k1_v0_10_0_sha256 sha256; + rustsecp256k1_v0_10_0_sha256_initialize(&sha256); + rustsecp256k1_v0_10_0_sha256_write(&sha256, key, keylen); + rustsecp256k1_v0_10_0_sha256_finalize(&sha256, rkey); memset(rkey + 32, 0, 32); } - rustsecp256k1_v0_9_2_sha256_initialize(&hash->outer); + rustsecp256k1_v0_10_0_sha256_initialize(&hash->outer); for (n = 0; n < sizeof(rkey); n++) { rkey[n] ^= 0x5c; } - rustsecp256k1_v0_9_2_sha256_write(&hash->outer, rkey, sizeof(rkey)); + rustsecp256k1_v0_10_0_sha256_write(&hash->outer, rkey, sizeof(rkey)); - rustsecp256k1_v0_9_2_sha256_initialize(&hash->inner); + rustsecp256k1_v0_10_0_sha256_initialize(&hash->inner); for (n = 0; n < sizeof(rkey); n++) { rkey[n] ^= 0x5c ^ 0x36; } - rustsecp256k1_v0_9_2_sha256_write(&hash->inner, rkey, sizeof(rkey)); + rustsecp256k1_v0_10_0_sha256_write(&hash->inner, rkey, sizeof(rkey)); memset(rkey, 0, sizeof(rkey)); } -static void rustsecp256k1_v0_9_2_hmac_sha256_write(rustsecp256k1_v0_9_2_hmac_sha256 *hash, const unsigned char *data, size_t size) { - rustsecp256k1_v0_9_2_sha256_write(&hash->inner, data, size); +static void rustsecp256k1_v0_10_0_hmac_sha256_write(rustsecp256k1_v0_10_0_hmac_sha256 *hash, const unsigned char *data, size_t size) { + rustsecp256k1_v0_10_0_sha256_write(&hash->inner, data, size); } -static void rustsecp256k1_v0_9_2_hmac_sha256_finalize(rustsecp256k1_v0_9_2_hmac_sha256 *hash, unsigned char *out32) { +static void rustsecp256k1_v0_10_0_hmac_sha256_finalize(rustsecp256k1_v0_10_0_hmac_sha256 *hash, unsigned char *out32) { unsigned char temp[32]; - rustsecp256k1_v0_9_2_sha256_finalize(&hash->inner, temp); - rustsecp256k1_v0_9_2_sha256_write(&hash->outer, temp, 32); + rustsecp256k1_v0_10_0_sha256_finalize(&hash->inner, temp); + rustsecp256k1_v0_10_0_sha256_write(&hash->outer, temp, 32); memset(temp, 0, 32); - rustsecp256k1_v0_9_2_sha256_finalize(&hash->outer, out32); + rustsecp256k1_v0_10_0_sha256_finalize(&hash->outer, out32); } -static void rustsecp256k1_v0_9_2_rfc6979_hmac_sha256_initialize(rustsecp256k1_v0_9_2_rfc6979_hmac_sha256 *rng, const unsigned char *key, size_t keylen) { - rustsecp256k1_v0_9_2_hmac_sha256 hmac; +static void rustsecp256k1_v0_10_0_rfc6979_hmac_sha256_initialize(rustsecp256k1_v0_10_0_rfc6979_hmac_sha256 *rng, const unsigned char *key, size_t keylen) { + rustsecp256k1_v0_10_0_hmac_sha256 hmac; static const unsigned char zero[1] = {0x00}; static const unsigned char one[1] = {0x01}; @@ -221,47 +221,47 @@ static void rustsecp256k1_v0_9_2_rfc6979_hmac_sha256_initialize(rustsecp256k1_v0 memset(rng->k, 0x00, 32); /* RFC6979 3.2.c. */ /* RFC6979 3.2.d. */ - rustsecp256k1_v0_9_2_hmac_sha256_initialize(&hmac, rng->k, 32); - rustsecp256k1_v0_9_2_hmac_sha256_write(&hmac, rng->v, 32); - rustsecp256k1_v0_9_2_hmac_sha256_write(&hmac, zero, 1); - rustsecp256k1_v0_9_2_hmac_sha256_write(&hmac, key, keylen); - rustsecp256k1_v0_9_2_hmac_sha256_finalize(&hmac, rng->k); - rustsecp256k1_v0_9_2_hmac_sha256_initialize(&hmac, rng->k, 32); - rustsecp256k1_v0_9_2_hmac_sha256_write(&hmac, rng->v, 32); - rustsecp256k1_v0_9_2_hmac_sha256_finalize(&hmac, rng->v); + rustsecp256k1_v0_10_0_hmac_sha256_initialize(&hmac, rng->k, 32); + rustsecp256k1_v0_10_0_hmac_sha256_write(&hmac, rng->v, 32); + rustsecp256k1_v0_10_0_hmac_sha256_write(&hmac, zero, 1); + rustsecp256k1_v0_10_0_hmac_sha256_write(&hmac, key, keylen); + rustsecp256k1_v0_10_0_hmac_sha256_finalize(&hmac, rng->k); + rustsecp256k1_v0_10_0_hmac_sha256_initialize(&hmac, rng->k, 32); + rustsecp256k1_v0_10_0_hmac_sha256_write(&hmac, rng->v, 32); + rustsecp256k1_v0_10_0_hmac_sha256_finalize(&hmac, rng->v); /* RFC6979 3.2.f. */ - rustsecp256k1_v0_9_2_hmac_sha256_initialize(&hmac, rng->k, 32); - rustsecp256k1_v0_9_2_hmac_sha256_write(&hmac, rng->v, 32); - rustsecp256k1_v0_9_2_hmac_sha256_write(&hmac, one, 1); - rustsecp256k1_v0_9_2_hmac_sha256_write(&hmac, key, keylen); - rustsecp256k1_v0_9_2_hmac_sha256_finalize(&hmac, rng->k); - rustsecp256k1_v0_9_2_hmac_sha256_initialize(&hmac, rng->k, 32); - rustsecp256k1_v0_9_2_hmac_sha256_write(&hmac, rng->v, 32); - rustsecp256k1_v0_9_2_hmac_sha256_finalize(&hmac, rng->v); + rustsecp256k1_v0_10_0_hmac_sha256_initialize(&hmac, rng->k, 32); + rustsecp256k1_v0_10_0_hmac_sha256_write(&hmac, rng->v, 32); + rustsecp256k1_v0_10_0_hmac_sha256_write(&hmac, one, 1); + rustsecp256k1_v0_10_0_hmac_sha256_write(&hmac, key, keylen); + rustsecp256k1_v0_10_0_hmac_sha256_finalize(&hmac, rng->k); + rustsecp256k1_v0_10_0_hmac_sha256_initialize(&hmac, rng->k, 32); + rustsecp256k1_v0_10_0_hmac_sha256_write(&hmac, rng->v, 32); + rustsecp256k1_v0_10_0_hmac_sha256_finalize(&hmac, rng->v); rng->retry = 0; } -static void rustsecp256k1_v0_9_2_rfc6979_hmac_sha256_generate(rustsecp256k1_v0_9_2_rfc6979_hmac_sha256 *rng, unsigned char *out, size_t outlen) { +static void rustsecp256k1_v0_10_0_rfc6979_hmac_sha256_generate(rustsecp256k1_v0_10_0_rfc6979_hmac_sha256 *rng, unsigned char *out, size_t outlen) { /* RFC6979 3.2.h. */ static const unsigned char zero[1] = {0x00}; if (rng->retry) { - rustsecp256k1_v0_9_2_hmac_sha256 hmac; - rustsecp256k1_v0_9_2_hmac_sha256_initialize(&hmac, rng->k, 32); - rustsecp256k1_v0_9_2_hmac_sha256_write(&hmac, rng->v, 32); - rustsecp256k1_v0_9_2_hmac_sha256_write(&hmac, zero, 1); - rustsecp256k1_v0_9_2_hmac_sha256_finalize(&hmac, rng->k); - rustsecp256k1_v0_9_2_hmac_sha256_initialize(&hmac, rng->k, 32); - rustsecp256k1_v0_9_2_hmac_sha256_write(&hmac, rng->v, 32); - rustsecp256k1_v0_9_2_hmac_sha256_finalize(&hmac, rng->v); + rustsecp256k1_v0_10_0_hmac_sha256 hmac; + rustsecp256k1_v0_10_0_hmac_sha256_initialize(&hmac, rng->k, 32); + rustsecp256k1_v0_10_0_hmac_sha256_write(&hmac, rng->v, 32); + rustsecp256k1_v0_10_0_hmac_sha256_write(&hmac, zero, 1); + rustsecp256k1_v0_10_0_hmac_sha256_finalize(&hmac, rng->k); + rustsecp256k1_v0_10_0_hmac_sha256_initialize(&hmac, rng->k, 32); + rustsecp256k1_v0_10_0_hmac_sha256_write(&hmac, rng->v, 32); + rustsecp256k1_v0_10_0_hmac_sha256_finalize(&hmac, rng->v); } while (outlen > 0) { - rustsecp256k1_v0_9_2_hmac_sha256 hmac; + rustsecp256k1_v0_10_0_hmac_sha256 hmac; int now = outlen; - rustsecp256k1_v0_9_2_hmac_sha256_initialize(&hmac, rng->k, 32); - rustsecp256k1_v0_9_2_hmac_sha256_write(&hmac, rng->v, 32); - rustsecp256k1_v0_9_2_hmac_sha256_finalize(&hmac, rng->v); + rustsecp256k1_v0_10_0_hmac_sha256_initialize(&hmac, rng->k, 32); + rustsecp256k1_v0_10_0_hmac_sha256_write(&hmac, rng->v, 32); + rustsecp256k1_v0_10_0_hmac_sha256_finalize(&hmac, rng->v); if (now > 32) { now = 32; } @@ -273,7 +273,7 @@ static void rustsecp256k1_v0_9_2_rfc6979_hmac_sha256_generate(rustsecp256k1_v0_9 rng->retry = 1; } -static void rustsecp256k1_v0_9_2_rfc6979_hmac_sha256_finalize(rustsecp256k1_v0_9_2_rfc6979_hmac_sha256 *rng) { +static void rustsecp256k1_v0_10_0_rfc6979_hmac_sha256_finalize(rustsecp256k1_v0_10_0_rfc6979_hmac_sha256 *rng) { memset(rng->k, 0, 32); memset(rng->v, 0, 32); rng->retry = 0; diff --git a/secp256k1-sys/depend/secp256k1/src/int128.h b/secp256k1-sys/depend/secp256k1/src/int128.h index 309ceba9c..fbdafa79d 100644 --- a/secp256k1-sys/depend/secp256k1/src/int128.h +++ b/secp256k1-sys/depend/secp256k1/src/int128.h @@ -13,77 +13,77 @@ # endif /* Construct an unsigned 128-bit value from a high and a low 64-bit value. */ -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_u128_load(rustsecp256k1_v0_9_2_uint128 *r, uint64_t hi, uint64_t lo); +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_u128_load(rustsecp256k1_v0_10_0_uint128 *r, uint64_t hi, uint64_t lo); /* Multiply two unsigned 64-bit values a and b and write the result to r. */ -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_u128_mul(rustsecp256k1_v0_9_2_uint128 *r, uint64_t a, uint64_t b); +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_u128_mul(rustsecp256k1_v0_10_0_uint128 *r, uint64_t a, uint64_t b); /* Multiply two unsigned 64-bit values a and b and add the result to r. * The final result is taken modulo 2^128. */ -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_u128_accum_mul(rustsecp256k1_v0_9_2_uint128 *r, uint64_t a, uint64_t b); +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_u128_accum_mul(rustsecp256k1_v0_10_0_uint128 *r, uint64_t a, uint64_t b); /* Add an unsigned 64-bit value a to r. * The final result is taken modulo 2^128. */ -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_u128_accum_u64(rustsecp256k1_v0_9_2_uint128 *r, uint64_t a); +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_u128_accum_u64(rustsecp256k1_v0_10_0_uint128 *r, uint64_t a); /* Unsigned (logical) right shift. * Non-constant time in n. */ -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_u128_rshift(rustsecp256k1_v0_9_2_uint128 *r, unsigned int n); +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_u128_rshift(rustsecp256k1_v0_10_0_uint128 *r, unsigned int n); /* Return the low 64-bits of a 128-bit value as an unsigned 64-bit value. */ -static SECP256K1_INLINE uint64_t rustsecp256k1_v0_9_2_u128_to_u64(const rustsecp256k1_v0_9_2_uint128 *a); +static SECP256K1_INLINE uint64_t rustsecp256k1_v0_10_0_u128_to_u64(const rustsecp256k1_v0_10_0_uint128 *a); /* Return the high 64-bits of a 128-bit value as an unsigned 64-bit value. */ -static SECP256K1_INLINE uint64_t rustsecp256k1_v0_9_2_u128_hi_u64(const rustsecp256k1_v0_9_2_uint128 *a); +static SECP256K1_INLINE uint64_t rustsecp256k1_v0_10_0_u128_hi_u64(const rustsecp256k1_v0_10_0_uint128 *a); /* Write an unsigned 64-bit value to r. */ -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_u128_from_u64(rustsecp256k1_v0_9_2_uint128 *r, uint64_t a); +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_u128_from_u64(rustsecp256k1_v0_10_0_uint128 *r, uint64_t a); /* Tests if r is strictly less than to 2^n. * n must be strictly less than 128. */ -static SECP256K1_INLINE int rustsecp256k1_v0_9_2_u128_check_bits(const rustsecp256k1_v0_9_2_uint128 *r, unsigned int n); +static SECP256K1_INLINE int rustsecp256k1_v0_10_0_u128_check_bits(const rustsecp256k1_v0_10_0_uint128 *r, unsigned int n); /* Construct an signed 128-bit value from a high and a low 64-bit value. */ -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_i128_load(rustsecp256k1_v0_9_2_int128 *r, int64_t hi, uint64_t lo); +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_i128_load(rustsecp256k1_v0_10_0_int128 *r, int64_t hi, uint64_t lo); /* Multiply two signed 64-bit values a and b and write the result to r. */ -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_i128_mul(rustsecp256k1_v0_9_2_int128 *r, int64_t a, int64_t b); +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_i128_mul(rustsecp256k1_v0_10_0_int128 *r, int64_t a, int64_t b); /* Multiply two signed 64-bit values a and b and add the result to r. * Overflow or underflow from the addition is undefined behaviour. */ -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_i128_accum_mul(rustsecp256k1_v0_9_2_int128 *r, int64_t a, int64_t b); +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_i128_accum_mul(rustsecp256k1_v0_10_0_int128 *r, int64_t a, int64_t b); /* Compute a*d - b*c from signed 64-bit values and write the result to r. */ -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_i128_det(rustsecp256k1_v0_9_2_int128 *r, int64_t a, int64_t b, int64_t c, int64_t d); +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_i128_det(rustsecp256k1_v0_10_0_int128 *r, int64_t a, int64_t b, int64_t c, int64_t d); /* Signed (arithmetic) right shift. * Non-constant time in b. */ -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_i128_rshift(rustsecp256k1_v0_9_2_int128 *r, unsigned int b); +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_i128_rshift(rustsecp256k1_v0_10_0_int128 *r, unsigned int b); /* Return the input value modulo 2^64. */ -static SECP256K1_INLINE uint64_t rustsecp256k1_v0_9_2_i128_to_u64(const rustsecp256k1_v0_9_2_int128 *a); +static SECP256K1_INLINE uint64_t rustsecp256k1_v0_10_0_i128_to_u64(const rustsecp256k1_v0_10_0_int128 *a); /* Return the value as a signed 64-bit value. * Requires the input to be between INT64_MIN and INT64_MAX. */ -static SECP256K1_INLINE int64_t rustsecp256k1_v0_9_2_i128_to_i64(const rustsecp256k1_v0_9_2_int128 *a); +static SECP256K1_INLINE int64_t rustsecp256k1_v0_10_0_i128_to_i64(const rustsecp256k1_v0_10_0_int128 *a); /* Write a signed 64-bit value to r. */ -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_i128_from_i64(rustsecp256k1_v0_9_2_int128 *r, int64_t a); +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_i128_from_i64(rustsecp256k1_v0_10_0_int128 *r, int64_t a); /* Compare two 128-bit values for equality. */ -static SECP256K1_INLINE int rustsecp256k1_v0_9_2_i128_eq_var(const rustsecp256k1_v0_9_2_int128 *a, const rustsecp256k1_v0_9_2_int128 *b); +static SECP256K1_INLINE int rustsecp256k1_v0_10_0_i128_eq_var(const rustsecp256k1_v0_10_0_int128 *a, const rustsecp256k1_v0_10_0_int128 *b); /* Tests if r is equal to sign*2^n (sign must be 1 or -1). * n must be strictly less than 127. */ -static SECP256K1_INLINE int rustsecp256k1_v0_9_2_i128_check_pow2(const rustsecp256k1_v0_9_2_int128 *r, unsigned int n, int sign); +static SECP256K1_INLINE int rustsecp256k1_v0_10_0_i128_check_pow2(const rustsecp256k1_v0_10_0_int128 *r, unsigned int n, int sign); #endif diff --git a/secp256k1-sys/depend/secp256k1/src/int128_native.h b/secp256k1-sys/depend/secp256k1/src/int128_native.h index 93cf08b5c..a6000b70f 100644 --- a/secp256k1-sys/depend/secp256k1/src/int128_native.h +++ b/secp256k1-sys/depend/secp256k1/src/int128_native.h @@ -13,7 +13,7 @@ SECP256K1_GNUC_EXT typedef __int128 int128_t; /* No (U)INT128_C macros because compilers providing __int128 do not support 128-bit literals. */ #endif -typedef uint128_t rustsecp256k1_v0_9_2_uint128; -typedef int128_t rustsecp256k1_v0_9_2_int128; +typedef uint128_t rustsecp256k1_v0_10_0_uint128; +typedef int128_t rustsecp256k1_v0_10_0_int128; #endif diff --git a/secp256k1-sys/depend/secp256k1/src/int128_native_impl.h b/secp256k1-sys/depend/secp256k1/src/int128_native_impl.h index 8d2e804a7..8c3f604ec 100644 --- a/secp256k1-sys/depend/secp256k1/src/int128_native_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/int128_native_impl.h @@ -4,88 +4,88 @@ #include "int128.h" #include "util.h" -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_u128_load(rustsecp256k1_v0_9_2_uint128 *r, uint64_t hi, uint64_t lo) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_u128_load(rustsecp256k1_v0_10_0_uint128 *r, uint64_t hi, uint64_t lo) { *r = (((uint128_t)hi) << 64) + lo; } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_u128_mul(rustsecp256k1_v0_9_2_uint128 *r, uint64_t a, uint64_t b) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_u128_mul(rustsecp256k1_v0_10_0_uint128 *r, uint64_t a, uint64_t b) { *r = (uint128_t)a * b; } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_u128_accum_mul(rustsecp256k1_v0_9_2_uint128 *r, uint64_t a, uint64_t b) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_u128_accum_mul(rustsecp256k1_v0_10_0_uint128 *r, uint64_t a, uint64_t b) { *r += (uint128_t)a * b; } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_u128_accum_u64(rustsecp256k1_v0_9_2_uint128 *r, uint64_t a) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_u128_accum_u64(rustsecp256k1_v0_10_0_uint128 *r, uint64_t a) { *r += a; } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_u128_rshift(rustsecp256k1_v0_9_2_uint128 *r, unsigned int n) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_u128_rshift(rustsecp256k1_v0_10_0_uint128 *r, unsigned int n) { VERIFY_CHECK(n < 128); *r >>= n; } -static SECP256K1_INLINE uint64_t rustsecp256k1_v0_9_2_u128_to_u64(const rustsecp256k1_v0_9_2_uint128 *a) { +static SECP256K1_INLINE uint64_t rustsecp256k1_v0_10_0_u128_to_u64(const rustsecp256k1_v0_10_0_uint128 *a) { return (uint64_t)(*a); } -static SECP256K1_INLINE uint64_t rustsecp256k1_v0_9_2_u128_hi_u64(const rustsecp256k1_v0_9_2_uint128 *a) { +static SECP256K1_INLINE uint64_t rustsecp256k1_v0_10_0_u128_hi_u64(const rustsecp256k1_v0_10_0_uint128 *a) { return (uint64_t)(*a >> 64); } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_u128_from_u64(rustsecp256k1_v0_9_2_uint128 *r, uint64_t a) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_u128_from_u64(rustsecp256k1_v0_10_0_uint128 *r, uint64_t a) { *r = a; } -static SECP256K1_INLINE int rustsecp256k1_v0_9_2_u128_check_bits(const rustsecp256k1_v0_9_2_uint128 *r, unsigned int n) { +static SECP256K1_INLINE int rustsecp256k1_v0_10_0_u128_check_bits(const rustsecp256k1_v0_10_0_uint128 *r, unsigned int n) { VERIFY_CHECK(n < 128); return (*r >> n == 0); } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_i128_load(rustsecp256k1_v0_9_2_int128 *r, int64_t hi, uint64_t lo) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_i128_load(rustsecp256k1_v0_10_0_int128 *r, int64_t hi, uint64_t lo) { *r = (((uint128_t)(uint64_t)hi) << 64) + lo; } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_i128_mul(rustsecp256k1_v0_9_2_int128 *r, int64_t a, int64_t b) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_i128_mul(rustsecp256k1_v0_10_0_int128 *r, int64_t a, int64_t b) { *r = (int128_t)a * b; } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_i128_accum_mul(rustsecp256k1_v0_9_2_int128 *r, int64_t a, int64_t b) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_i128_accum_mul(rustsecp256k1_v0_10_0_int128 *r, int64_t a, int64_t b) { int128_t ab = (int128_t)a * b; VERIFY_CHECK(0 <= ab ? *r <= INT128_MAX - ab : INT128_MIN - ab <= *r); *r += ab; } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_i128_det(rustsecp256k1_v0_9_2_int128 *r, int64_t a, int64_t b, int64_t c, int64_t d) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_i128_det(rustsecp256k1_v0_10_0_int128 *r, int64_t a, int64_t b, int64_t c, int64_t d) { int128_t ad = (int128_t)a * d; int128_t bc = (int128_t)b * c; VERIFY_CHECK(0 <= bc ? INT128_MIN + bc <= ad : ad <= INT128_MAX + bc); *r = ad - bc; } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_i128_rshift(rustsecp256k1_v0_9_2_int128 *r, unsigned int n) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_i128_rshift(rustsecp256k1_v0_10_0_int128 *r, unsigned int n) { VERIFY_CHECK(n < 128); *r >>= n; } -static SECP256K1_INLINE uint64_t rustsecp256k1_v0_9_2_i128_to_u64(const rustsecp256k1_v0_9_2_int128 *a) { +static SECP256K1_INLINE uint64_t rustsecp256k1_v0_10_0_i128_to_u64(const rustsecp256k1_v0_10_0_int128 *a) { return (uint64_t)*a; } -static SECP256K1_INLINE int64_t rustsecp256k1_v0_9_2_i128_to_i64(const rustsecp256k1_v0_9_2_int128 *a) { +static SECP256K1_INLINE int64_t rustsecp256k1_v0_10_0_i128_to_i64(const rustsecp256k1_v0_10_0_int128 *a) { VERIFY_CHECK(INT64_MIN <= *a && *a <= INT64_MAX); return *a; } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_i128_from_i64(rustsecp256k1_v0_9_2_int128 *r, int64_t a) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_i128_from_i64(rustsecp256k1_v0_10_0_int128 *r, int64_t a) { *r = a; } -static SECP256K1_INLINE int rustsecp256k1_v0_9_2_i128_eq_var(const rustsecp256k1_v0_9_2_int128 *a, const rustsecp256k1_v0_9_2_int128 *b) { +static SECP256K1_INLINE int rustsecp256k1_v0_10_0_i128_eq_var(const rustsecp256k1_v0_10_0_int128 *a, const rustsecp256k1_v0_10_0_int128 *b) { return *a == *b; } -static SECP256K1_INLINE int rustsecp256k1_v0_9_2_i128_check_pow2(const rustsecp256k1_v0_9_2_int128 *r, unsigned int n, int sign) { +static SECP256K1_INLINE int rustsecp256k1_v0_10_0_i128_check_pow2(const rustsecp256k1_v0_10_0_int128 *r, unsigned int n, int sign) { VERIFY_CHECK(n < 127); VERIFY_CHECK(sign == 1 || sign == -1); return (*r == (int128_t)((uint128_t)sign << n)); diff --git a/secp256k1-sys/depend/secp256k1/src/int128_struct.h b/secp256k1-sys/depend/secp256k1/src/int128_struct.h index 66083f823..07d148360 100644 --- a/secp256k1-sys/depend/secp256k1/src/int128_struct.h +++ b/secp256k1-sys/depend/secp256k1/src/int128_struct.h @@ -7,8 +7,8 @@ typedef struct { uint64_t lo; uint64_t hi; -} rustsecp256k1_v0_9_2_uint128; +} rustsecp256k1_v0_10_0_uint128; -typedef rustsecp256k1_v0_9_2_uint128 rustsecp256k1_v0_9_2_int128; +typedef rustsecp256k1_v0_10_0_uint128 rustsecp256k1_v0_10_0_int128; #endif diff --git a/secp256k1-sys/depend/secp256k1/src/int128_struct_impl.h b/secp256k1-sys/depend/secp256k1/src/int128_struct_impl.h index 9125856f3..22e8b982d 100644 --- a/secp256k1-sys/depend/secp256k1/src/int128_struct_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/int128_struct_impl.h @@ -13,23 +13,23 @@ # if defined(SECP256K1_MSVC_MULH_TEST_OVERRIDE) # pragma message(__FILE__ ": SECP256K1_MSVC_MULH_TEST_OVERRIDE is defined, forcing use of __(u)mulh.") # endif -static SECP256K1_INLINE uint64_t rustsecp256k1_v0_9_2_umul128(uint64_t a, uint64_t b, uint64_t* hi) { +static SECP256K1_INLINE uint64_t rustsecp256k1_v0_10_0_umul128(uint64_t a, uint64_t b, uint64_t* hi) { *hi = __umulh(a, b); return a * b; } -static SECP256K1_INLINE int64_t rustsecp256k1_v0_9_2_mul128(int64_t a, int64_t b, int64_t* hi) { +static SECP256K1_INLINE int64_t rustsecp256k1_v0_10_0_mul128(int64_t a, int64_t b, int64_t* hi) { *hi = __mulh(a, b); return (uint64_t)a * (uint64_t)b; } # else /* On x84_64 MSVC, use native _(u)mul128 for 64x64->128 multiplications. */ -# define rustsecp256k1_v0_9_2_umul128 _umul128 -# define rustsecp256k1_v0_9_2_mul128 _mul128 +# define rustsecp256k1_v0_10_0_umul128 _umul128 +# define rustsecp256k1_v0_10_0_mul128 _mul128 # endif #else /* On other systems, emulate 64x64->128 multiplications using 32x32->64 multiplications. */ -static SECP256K1_INLINE uint64_t rustsecp256k1_v0_9_2_umul128(uint64_t a, uint64_t b, uint64_t* hi) { +static SECP256K1_INLINE uint64_t rustsecp256k1_v0_10_0_umul128(uint64_t a, uint64_t b, uint64_t* hi) { uint64_t ll = (uint64_t)(uint32_t)a * (uint32_t)b; uint64_t lh = (uint32_t)a * (b >> 32); uint64_t hl = (a >> 32) * (uint32_t)b; @@ -39,7 +39,7 @@ static SECP256K1_INLINE uint64_t rustsecp256k1_v0_9_2_umul128(uint64_t a, uint64 return (mid34 << 32) + (uint32_t)ll; } -static SECP256K1_INLINE int64_t rustsecp256k1_v0_9_2_mul128(int64_t a, int64_t b, int64_t* hi) { +static SECP256K1_INLINE int64_t rustsecp256k1_v0_10_0_mul128(int64_t a, int64_t b, int64_t* hi) { uint64_t ll = (uint64_t)(uint32_t)a * (uint32_t)b; int64_t lh = (uint32_t)a * (b >> 32); int64_t hl = (a >> 32) * (uint32_t)b; @@ -50,23 +50,23 @@ static SECP256K1_INLINE int64_t rustsecp256k1_v0_9_2_mul128(int64_t a, int64_t b } #endif -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_u128_load(rustsecp256k1_v0_9_2_uint128 *r, uint64_t hi, uint64_t lo) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_u128_load(rustsecp256k1_v0_10_0_uint128 *r, uint64_t hi, uint64_t lo) { r->hi = hi; r->lo = lo; } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_u128_mul(rustsecp256k1_v0_9_2_uint128 *r, uint64_t a, uint64_t b) { - r->lo = rustsecp256k1_v0_9_2_umul128(a, b, &r->hi); +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_u128_mul(rustsecp256k1_v0_10_0_uint128 *r, uint64_t a, uint64_t b) { + r->lo = rustsecp256k1_v0_10_0_umul128(a, b, &r->hi); } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_u128_accum_mul(rustsecp256k1_v0_9_2_uint128 *r, uint64_t a, uint64_t b) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_u128_accum_mul(rustsecp256k1_v0_10_0_uint128 *r, uint64_t a, uint64_t b) { uint64_t lo, hi; - lo = rustsecp256k1_v0_9_2_umul128(a, b, &hi); + lo = rustsecp256k1_v0_10_0_umul128(a, b, &hi); r->lo += lo; r->hi += hi + (r->lo < lo); } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_u128_accum_u64(rustsecp256k1_v0_9_2_uint128 *r, uint64_t a) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_u128_accum_u64(rustsecp256k1_v0_10_0_uint128 *r, uint64_t a) { r->lo += a; r->hi += r->lo < a; } @@ -74,7 +74,7 @@ static SECP256K1_INLINE void rustsecp256k1_v0_9_2_u128_accum_u64(rustsecp256k1_v /* Unsigned (logical) right shift. * Non-constant time in n. */ -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_u128_rshift(rustsecp256k1_v0_9_2_uint128 *r, unsigned int n) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_u128_rshift(rustsecp256k1_v0_10_0_uint128 *r, unsigned int n) { VERIFY_CHECK(n < 128); if (n >= 64) { r->lo = r->hi >> (n-64); @@ -90,39 +90,39 @@ static SECP256K1_INLINE void rustsecp256k1_v0_9_2_u128_rshift(rustsecp256k1_v0_9 } } -static SECP256K1_INLINE uint64_t rustsecp256k1_v0_9_2_u128_to_u64(const rustsecp256k1_v0_9_2_uint128 *a) { +static SECP256K1_INLINE uint64_t rustsecp256k1_v0_10_0_u128_to_u64(const rustsecp256k1_v0_10_0_uint128 *a) { return a->lo; } -static SECP256K1_INLINE uint64_t rustsecp256k1_v0_9_2_u128_hi_u64(const rustsecp256k1_v0_9_2_uint128 *a) { +static SECP256K1_INLINE uint64_t rustsecp256k1_v0_10_0_u128_hi_u64(const rustsecp256k1_v0_10_0_uint128 *a) { return a->hi; } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_u128_from_u64(rustsecp256k1_v0_9_2_uint128 *r, uint64_t a) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_u128_from_u64(rustsecp256k1_v0_10_0_uint128 *r, uint64_t a) { r->hi = 0; r->lo = a; } -static SECP256K1_INLINE int rustsecp256k1_v0_9_2_u128_check_bits(const rustsecp256k1_v0_9_2_uint128 *r, unsigned int n) { +static SECP256K1_INLINE int rustsecp256k1_v0_10_0_u128_check_bits(const rustsecp256k1_v0_10_0_uint128 *r, unsigned int n) { VERIFY_CHECK(n < 128); return n >= 64 ? r->hi >> (n - 64) == 0 : r->hi == 0 && r->lo >> n == 0; } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_i128_load(rustsecp256k1_v0_9_2_int128 *r, int64_t hi, uint64_t lo) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_i128_load(rustsecp256k1_v0_10_0_int128 *r, int64_t hi, uint64_t lo) { r->hi = hi; r->lo = lo; } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_i128_mul(rustsecp256k1_v0_9_2_int128 *r, int64_t a, int64_t b) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_i128_mul(rustsecp256k1_v0_10_0_int128 *r, int64_t a, int64_t b) { int64_t hi; - r->lo = (uint64_t)rustsecp256k1_v0_9_2_mul128(a, b, &hi); + r->lo = (uint64_t)rustsecp256k1_v0_10_0_mul128(a, b, &hi); r->hi = (uint64_t)hi; } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_i128_accum_mul(rustsecp256k1_v0_9_2_int128 *r, int64_t a, int64_t b) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_i128_accum_mul(rustsecp256k1_v0_10_0_int128 *r, int64_t a, int64_t b) { int64_t hi; - uint64_t lo = (uint64_t)rustsecp256k1_v0_9_2_mul128(a, b, &hi); + uint64_t lo = (uint64_t)rustsecp256k1_v0_10_0_mul128(a, b, &hi); r->lo += lo; hi += r->lo < lo; /* Verify no overflow. @@ -139,9 +139,9 @@ static SECP256K1_INLINE void rustsecp256k1_v0_9_2_i128_accum_mul(rustsecp256k1_v r->hi += hi; } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_i128_dissip_mul(rustsecp256k1_v0_9_2_int128 *r, int64_t a, int64_t b) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_i128_dissip_mul(rustsecp256k1_v0_10_0_int128 *r, int64_t a, int64_t b) { int64_t hi; - uint64_t lo = (uint64_t)rustsecp256k1_v0_9_2_mul128(a, b, &hi); + uint64_t lo = (uint64_t)rustsecp256k1_v0_10_0_mul128(a, b, &hi); hi += r->lo < lo; /* Verify no overflow. * If r represents a positive value (the sign bit is not set) and the value we are subtracting is a negative value (the sign bit is set), @@ -157,15 +157,15 @@ static SECP256K1_INLINE void rustsecp256k1_v0_9_2_i128_dissip_mul(rustsecp256k1_ r->lo -= lo; } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_i128_det(rustsecp256k1_v0_9_2_int128 *r, int64_t a, int64_t b, int64_t c, int64_t d) { - rustsecp256k1_v0_9_2_i128_mul(r, a, d); - rustsecp256k1_v0_9_2_i128_dissip_mul(r, b, c); +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_i128_det(rustsecp256k1_v0_10_0_int128 *r, int64_t a, int64_t b, int64_t c, int64_t d) { + rustsecp256k1_v0_10_0_i128_mul(r, a, d); + rustsecp256k1_v0_10_0_i128_dissip_mul(r, b, c); } /* Signed (arithmetic) right shift. * Non-constant time in n. */ -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_i128_rshift(rustsecp256k1_v0_9_2_int128 *r, unsigned int n) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_i128_rshift(rustsecp256k1_v0_10_0_int128 *r, unsigned int n) { VERIFY_CHECK(n < 128); if (n >= 64) { r->lo = (uint64_t)((int64_t)(r->hi) >> (n-64)); @@ -176,26 +176,26 @@ static SECP256K1_INLINE void rustsecp256k1_v0_9_2_i128_rshift(rustsecp256k1_v0_9 } } -static SECP256K1_INLINE uint64_t rustsecp256k1_v0_9_2_i128_to_u64(const rustsecp256k1_v0_9_2_int128 *a) { +static SECP256K1_INLINE uint64_t rustsecp256k1_v0_10_0_i128_to_u64(const rustsecp256k1_v0_10_0_int128 *a) { return a->lo; } -static SECP256K1_INLINE int64_t rustsecp256k1_v0_9_2_i128_to_i64(const rustsecp256k1_v0_9_2_int128 *a) { +static SECP256K1_INLINE int64_t rustsecp256k1_v0_10_0_i128_to_i64(const rustsecp256k1_v0_10_0_int128 *a) { /* Verify that a represents a 64 bit signed value by checking that the high bits are a sign extension of the low bits. */ VERIFY_CHECK(a->hi == -(a->lo >> 63)); - return (int64_t)rustsecp256k1_v0_9_2_i128_to_u64(a); + return (int64_t)rustsecp256k1_v0_10_0_i128_to_u64(a); } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_i128_from_i64(rustsecp256k1_v0_9_2_int128 *r, int64_t a) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_i128_from_i64(rustsecp256k1_v0_10_0_int128 *r, int64_t a) { r->hi = (uint64_t)(a >> 63); r->lo = (uint64_t)a; } -static SECP256K1_INLINE int rustsecp256k1_v0_9_2_i128_eq_var(const rustsecp256k1_v0_9_2_int128 *a, const rustsecp256k1_v0_9_2_int128 *b) { +static SECP256K1_INLINE int rustsecp256k1_v0_10_0_i128_eq_var(const rustsecp256k1_v0_10_0_int128 *a, const rustsecp256k1_v0_10_0_int128 *b) { return a->hi == b->hi && a->lo == b->lo; } -static SECP256K1_INLINE int rustsecp256k1_v0_9_2_i128_check_pow2(const rustsecp256k1_v0_9_2_int128 *r, unsigned int n, int sign) { +static SECP256K1_INLINE int rustsecp256k1_v0_10_0_i128_check_pow2(const rustsecp256k1_v0_10_0_int128 *r, unsigned int n, int sign) { VERIFY_CHECK(n < 127); VERIFY_CHECK(sign == 1 || sign == -1); return n >= 64 ? r->hi == (uint64_t)sign << (n - 64) && r->lo == 0 diff --git a/secp256k1-sys/depend/secp256k1/src/modinv32.h b/secp256k1-sys/depend/secp256k1/src/modinv32.h index a5570cd78..769683585 100644 --- a/secp256k1-sys/depend/secp256k1/src/modinv32.h +++ b/secp256k1-sys/depend/secp256k1/src/modinv32.h @@ -14,15 +14,15 @@ * Its value is sum(v[i] * 2^(30*i), i=0..8). */ typedef struct { int32_t v[9]; -} rustsecp256k1_v0_9_2_modinv32_signed30; +} rustsecp256k1_v0_10_0_modinv32_signed30; typedef struct { /* The modulus in signed30 notation, must be odd and in [3, 2^256]. */ - rustsecp256k1_v0_9_2_modinv32_signed30 modulus; + rustsecp256k1_v0_10_0_modinv32_signed30 modulus; /* modulus^{-1} mod 2^30 */ uint32_t modulus_inv30; -} rustsecp256k1_v0_9_2_modinv32_modinfo; +} rustsecp256k1_v0_10_0_modinv32_modinfo; /* Replace x with its modular inverse mod modinfo->modulus. x must be in range [0, modulus). * If x is zero, the result will be zero as well. If not, the inverse must exist (i.e., the gcd of @@ -30,14 +30,14 @@ typedef struct { * * On output, all of x's limbs will be in [0, 2^30). */ -static void rustsecp256k1_v0_9_2_modinv32_var(rustsecp256k1_v0_9_2_modinv32_signed30 *x, const rustsecp256k1_v0_9_2_modinv32_modinfo *modinfo); +static void rustsecp256k1_v0_10_0_modinv32_var(rustsecp256k1_v0_10_0_modinv32_signed30 *x, const rustsecp256k1_v0_10_0_modinv32_modinfo *modinfo); -/* Same as rustsecp256k1_v0_9_2_modinv32_var, but constant time in x (not in the modulus). */ -static void rustsecp256k1_v0_9_2_modinv32(rustsecp256k1_v0_9_2_modinv32_signed30 *x, const rustsecp256k1_v0_9_2_modinv32_modinfo *modinfo); +/* Same as rustsecp256k1_v0_10_0_modinv32_var, but constant time in x (not in the modulus). */ +static void rustsecp256k1_v0_10_0_modinv32(rustsecp256k1_v0_10_0_modinv32_signed30 *x, const rustsecp256k1_v0_10_0_modinv32_modinfo *modinfo); /* Compute the Jacobi symbol for (x | modinfo->modulus). x must be coprime with modulus (and thus * cannot be 0, as modulus >= 3). All limbs of x must be non-negative. Returns 0 if the result * cannot be computed. */ -static int rustsecp256k1_v0_9_2_jacobi32_maybe_var(const rustsecp256k1_v0_9_2_modinv32_signed30 *x, const rustsecp256k1_v0_9_2_modinv32_modinfo *modinfo); +static int rustsecp256k1_v0_10_0_jacobi32_maybe_var(const rustsecp256k1_v0_10_0_modinv32_signed30 *x, const rustsecp256k1_v0_10_0_modinv32_modinfo *modinfo); #endif /* SECP256K1_MODINV32_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/modinv32_impl.h b/secp256k1-sys/depend/secp256k1/src/modinv32_impl.h index 1c3bbf962..6adb3087b 100644 --- a/secp256k1-sys/depend/secp256k1/src/modinv32_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modinv32_impl.h @@ -21,10 +21,10 @@ */ #ifdef VERIFY -static const rustsecp256k1_v0_9_2_modinv32_signed30 SECP256K1_SIGNED30_ONE = {{1}}; +static const rustsecp256k1_v0_10_0_modinv32_signed30 SECP256K1_SIGNED30_ONE = {{1}}; /* Compute a*factor and put it in r. All but the top limb in r will be in range [0,2^30). */ -static void rustsecp256k1_v0_9_2_modinv32_mul_30(rustsecp256k1_v0_9_2_modinv32_signed30 *r, const rustsecp256k1_v0_9_2_modinv32_signed30 *a, int alen, int32_t factor) { +static void rustsecp256k1_v0_10_0_modinv32_mul_30(rustsecp256k1_v0_10_0_modinv32_signed30 *r, const rustsecp256k1_v0_10_0_modinv32_signed30 *a, int alen, int32_t factor) { const int32_t M30 = (int32_t)(UINT32_MAX >> 2); int64_t c = 0; int i; @@ -38,11 +38,11 @@ static void rustsecp256k1_v0_9_2_modinv32_mul_30(rustsecp256k1_v0_9_2_modinv32_s } /* Return -1 for ab*factor. A consists of alen limbs; b has 9. */ -static int rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(const rustsecp256k1_v0_9_2_modinv32_signed30 *a, int alen, const rustsecp256k1_v0_9_2_modinv32_signed30 *b, int32_t factor) { +static int rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(const rustsecp256k1_v0_10_0_modinv32_signed30 *a, int alen, const rustsecp256k1_v0_10_0_modinv32_signed30 *b, int32_t factor) { int i; - rustsecp256k1_v0_9_2_modinv32_signed30 am, bm; - rustsecp256k1_v0_9_2_modinv32_mul_30(&am, a, alen, 1); /* Normalize all but the top limb of a. */ - rustsecp256k1_v0_9_2_modinv32_mul_30(&bm, b, 9, factor); + rustsecp256k1_v0_10_0_modinv32_signed30 am, bm; + rustsecp256k1_v0_10_0_modinv32_mul_30(&am, a, alen, 1); /* Normalize all but the top limb of a. */ + rustsecp256k1_v0_10_0_modinv32_mul_30(&bm, b, 9, factor); for (i = 0; i < 8; ++i) { /* Verify that all but the top limb of a and b are normalized. */ VERIFY_CHECK(am.v[i] >> 30 == 0); @@ -60,7 +60,7 @@ static int rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(const rustsecp256k1_v0_9_2_m * to it to bring it to range [0,modulus). If sign < 0, the input will also be negated in the * process. The input must have limbs in range (-2^30,2^30). The output will have limbs in range * [0,2^30). */ -static void rustsecp256k1_v0_9_2_modinv32_normalize_30(rustsecp256k1_v0_9_2_modinv32_signed30 *r, int32_t sign, const rustsecp256k1_v0_9_2_modinv32_modinfo *modinfo) { +static void rustsecp256k1_v0_10_0_modinv32_normalize_30(rustsecp256k1_v0_10_0_modinv32_signed30 *r, int32_t sign, const rustsecp256k1_v0_10_0_modinv32_modinfo *modinfo) { const int32_t M30 = (int32_t)(UINT32_MAX >> 2); int32_t r0 = r->v[0], r1 = r->v[1], r2 = r->v[2], r3 = r->v[3], r4 = r->v[4], r5 = r->v[5], r6 = r->v[6], r7 = r->v[7], r8 = r->v[8]; @@ -73,8 +73,8 @@ static void rustsecp256k1_v0_9_2_modinv32_normalize_30(rustsecp256k1_v0_9_2_modi VERIFY_CHECK(r->v[i] >= -M30); VERIFY_CHECK(r->v[i] <= M30); } - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, -2) > 0); /* r > -2*modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, -2) > 0); /* r > -2*modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */ #endif /* In a first step, add the modulus if the input is negative, and then negate if requested. @@ -144,7 +144,6 @@ static void rustsecp256k1_v0_9_2_modinv32_normalize_30(rustsecp256k1_v0_9_2_modi r->v[7] = r7; r->v[8] = r8; -#ifdef VERIFY VERIFY_CHECK(r0 >> 30 == 0); VERIFY_CHECK(r1 >> 30 == 0); VERIFY_CHECK(r2 >> 30 == 0); @@ -154,9 +153,8 @@ static void rustsecp256k1_v0_9_2_modinv32_normalize_30(rustsecp256k1_v0_9_2_modi VERIFY_CHECK(r6 >> 30 == 0); VERIFY_CHECK(r7 >> 30 == 0); VERIFY_CHECK(r8 >> 30 == 0); - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 0) >= 0); /* r >= 0 */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */ -#endif + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 0) >= 0); /* r >= 0 */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */ } /* Data type for transition matrices (see section 3 of explanation). @@ -166,7 +164,7 @@ static void rustsecp256k1_v0_9_2_modinv32_normalize_30(rustsecp256k1_v0_9_2_modi */ typedef struct { int32_t u, v, q, r; -} rustsecp256k1_v0_9_2_modinv32_trans2x2; +} rustsecp256k1_v0_10_0_modinv32_trans2x2; /* Compute the transition matrix and zeta for 30 divsteps. * @@ -178,7 +176,7 @@ typedef struct { * * Implements the divsteps_n_matrix function from the explanation. */ -static int32_t rustsecp256k1_v0_9_2_modinv32_divsteps_30(int32_t zeta, uint32_t f0, uint32_t g0, rustsecp256k1_v0_9_2_modinv32_trans2x2 *t) { +static int32_t rustsecp256k1_v0_10_0_modinv32_divsteps_30(int32_t zeta, uint32_t f0, uint32_t g0, rustsecp256k1_v0_10_0_modinv32_trans2x2 *t) { /* u,v,q,r are the elements of the transformation matrix being built up, * starting with the identity matrix. Semantically they are signed integers * in range [-2^30,2^30], but here represented as unsigned mod 2^32. This @@ -235,8 +233,8 @@ static int32_t rustsecp256k1_v0_9_2_modinv32_divsteps_30(int32_t zeta, uint32_t return zeta; } -/* rustsecp256k1_v0_9_2_modinv32_inv256[i] = -(2*i+1)^-1 (mod 256) */ -static const uint8_t rustsecp256k1_v0_9_2_modinv32_inv256[128] = { +/* rustsecp256k1_v0_10_0_modinv32_inv256[i] = -(2*i+1)^-1 (mod 256) */ +static const uint8_t rustsecp256k1_v0_10_0_modinv32_inv256[128] = { 0xFF, 0x55, 0x33, 0x49, 0xC7, 0x5D, 0x3B, 0x11, 0x0F, 0xE5, 0xC3, 0x59, 0xD7, 0xED, 0xCB, 0x21, 0x1F, 0x75, 0x53, 0x69, 0xE7, 0x7D, 0x5B, 0x31, 0x2F, 0x05, 0xE3, 0x79, 0xF7, 0x0D, 0xEB, 0x41, 0x3F, 0x95, 0x73, 0x89, @@ -260,8 +258,8 @@ static const uint8_t rustsecp256k1_v0_9_2_modinv32_inv256[128] = { * * Implements the divsteps_n_matrix_var function from the explanation. */ -static int32_t rustsecp256k1_v0_9_2_modinv32_divsteps_30_var(int32_t eta, uint32_t f0, uint32_t g0, rustsecp256k1_v0_9_2_modinv32_trans2x2 *t) { - /* Transformation matrix; see comments in rustsecp256k1_v0_9_2_modinv32_divsteps_30. */ +static int32_t rustsecp256k1_v0_10_0_modinv32_divsteps_30_var(int32_t eta, uint32_t f0, uint32_t g0, rustsecp256k1_v0_10_0_modinv32_trans2x2 *t) { + /* Transformation matrix; see comments in rustsecp256k1_v0_10_0_modinv32_divsteps_30. */ uint32_t u = 1, v = 0, q = 0, r = 1; uint32_t f = f0, g = g0, m; uint16_t w; @@ -269,7 +267,7 @@ static int32_t rustsecp256k1_v0_9_2_modinv32_divsteps_30_var(int32_t eta, uint32 for (;;) { /* Use a sentinel bit to count zeros only up to i. */ - zeros = rustsecp256k1_v0_9_2_ctz32_var(g | (UINT32_MAX << i)); + zeros = rustsecp256k1_v0_10_0_ctz32_var(g | (UINT32_MAX << i)); /* Perform zeros divsteps at once; they all just divide g by two. */ g >>= zeros; u <<= zeros; @@ -300,7 +298,7 @@ static int32_t rustsecp256k1_v0_9_2_modinv32_divsteps_30_var(int32_t eta, uint32 VERIFY_CHECK(limit > 0 && limit <= 30); m = (UINT32_MAX >> (32 - limit)) & 255U; /* Find what multiple of f must be added to g to cancel its bottom min(limit, 8) bits. */ - w = (g * rustsecp256k1_v0_9_2_modinv32_inv256[(f >> 1) & 127]) & m; + w = (g * rustsecp256k1_v0_10_0_modinv32_inv256[(f >> 1) & 127]) & m; /* Do so. */ g += f * w; q += u * w; @@ -333,7 +331,7 @@ static int32_t rustsecp256k1_v0_9_2_modinv32_divsteps_30_var(int32_t eta, uint32 * change, but are meaningless. * Return: final eta */ -static int32_t rustsecp256k1_v0_9_2_modinv32_posdivsteps_30_var(int32_t eta, uint32_t f0, uint32_t g0, rustsecp256k1_v0_9_2_modinv32_trans2x2 *t, int *jacp) { +static int32_t rustsecp256k1_v0_10_0_modinv32_posdivsteps_30_var(int32_t eta, uint32_t f0, uint32_t g0, rustsecp256k1_v0_10_0_modinv32_trans2x2 *t, int *jacp) { /* Transformation matrix. */ uint32_t u = 1, v = 0, q = 0, r = 1; uint32_t f = f0, g = g0, m; @@ -343,7 +341,7 @@ static int32_t rustsecp256k1_v0_9_2_modinv32_posdivsteps_30_var(int32_t eta, uin for (;;) { /* Use a sentinel bit to count zeros only up to i. */ - zeros = rustsecp256k1_v0_9_2_ctz32_var(g | (UINT32_MAX << i)); + zeros = rustsecp256k1_v0_10_0_ctz32_var(g | (UINT32_MAX << i)); /* Perform zeros divsteps at once; they all just divide g by two. */ g >>= zeros; u <<= zeros; @@ -378,7 +376,7 @@ static int32_t rustsecp256k1_v0_9_2_modinv32_posdivsteps_30_var(int32_t eta, uin VERIFY_CHECK(limit > 0 && limit <= 30); m = (UINT32_MAX >> (32 - limit)) & 255U; /* Find what multiple of f must be added to g to cancel its bottom min(limit, 8) bits. */ - w = (g * rustsecp256k1_v0_9_2_modinv32_inv256[(f >> 1) & 127]) & m; + w = (g * rustsecp256k1_v0_10_0_modinv32_inv256[(f >> 1) & 127]) & m; /* Do so. */ g += f * w; q += u * w; @@ -407,20 +405,19 @@ static int32_t rustsecp256k1_v0_9_2_modinv32_posdivsteps_30_var(int32_t eta, uin * * This implements the update_de function from the explanation. */ -static void rustsecp256k1_v0_9_2_modinv32_update_de_30(rustsecp256k1_v0_9_2_modinv32_signed30 *d, rustsecp256k1_v0_9_2_modinv32_signed30 *e, const rustsecp256k1_v0_9_2_modinv32_trans2x2 *t, const rustsecp256k1_v0_9_2_modinv32_modinfo* modinfo) { +static void rustsecp256k1_v0_10_0_modinv32_update_de_30(rustsecp256k1_v0_10_0_modinv32_signed30 *d, rustsecp256k1_v0_10_0_modinv32_signed30 *e, const rustsecp256k1_v0_10_0_modinv32_trans2x2 *t, const rustsecp256k1_v0_10_0_modinv32_modinfo* modinfo) { const int32_t M30 = (int32_t)(UINT32_MAX >> 2); const int32_t u = t->u, v = t->v, q = t->q, r = t->r; int32_t di, ei, md, me, sd, se; int64_t cd, ce; int i; -#ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */ VERIFY_CHECK(labs(u) <= (M30 + 1 - labs(v))); /* |u|+|v| <= 2^30 */ VERIFY_CHECK(labs(q) <= (M30 + 1 - labs(r))); /* |q|+|r| <= 2^30 */ -#endif + /* [md,me] start as zero; plus [u,q] if d is negative; plus [v,r] if e is negative. */ sd = d->v[8] >> 31; se = e->v[8] >> 31; @@ -455,19 +452,18 @@ static void rustsecp256k1_v0_9_2_modinv32_update_de_30(rustsecp256k1_v0_9_2_modi /* What remains is limb 9 of t*[d,e]+modulus*[md,me]; store it as output limb 8. */ d->v[8] = (int32_t)cd; e->v[8] = (int32_t)ce; -#ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */ -#endif + + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */ } /* Compute (t/2^30) * [f, g], where t is a transition matrix for 30 divsteps. * * This implements the update_fg function from the explanation. */ -static void rustsecp256k1_v0_9_2_modinv32_update_fg_30(rustsecp256k1_v0_9_2_modinv32_signed30 *f, rustsecp256k1_v0_9_2_modinv32_signed30 *g, const rustsecp256k1_v0_9_2_modinv32_trans2x2 *t) { +static void rustsecp256k1_v0_10_0_modinv32_update_fg_30(rustsecp256k1_v0_10_0_modinv32_signed30 *f, rustsecp256k1_v0_10_0_modinv32_signed30 *g, const rustsecp256k1_v0_10_0_modinv32_trans2x2 *t) { const int32_t M30 = (int32_t)(UINT32_MAX >> 2); const int32_t u = t->u, v = t->v, q = t->q, r = t->r; int32_t fi, gi; @@ -502,7 +498,7 @@ static void rustsecp256k1_v0_9_2_modinv32_update_fg_30(rustsecp256k1_v0_9_2_modi * * This implements the update_fg function from the explanation in modinv64_impl.h. */ -static void rustsecp256k1_v0_9_2_modinv32_update_fg_30_var(int len, rustsecp256k1_v0_9_2_modinv32_signed30 *f, rustsecp256k1_v0_9_2_modinv32_signed30 *g, const rustsecp256k1_v0_9_2_modinv32_trans2x2 *t) { +static void rustsecp256k1_v0_10_0_modinv32_update_fg_30_var(int len, rustsecp256k1_v0_10_0_modinv32_signed30 *f, rustsecp256k1_v0_10_0_modinv32_signed30 *g, const rustsecp256k1_v0_10_0_modinv32_trans2x2 *t) { const int32_t M30 = (int32_t)(UINT32_MAX >> 2); const int32_t u = t->u, v = t->v, q = t->q, r = t->r; int32_t fi, gi; @@ -533,65 +529,62 @@ static void rustsecp256k1_v0_9_2_modinv32_update_fg_30_var(int len, rustsecp256k } /* Compute the inverse of x modulo modinfo->modulus, and replace x with it (constant time in x). */ -static void rustsecp256k1_v0_9_2_modinv32(rustsecp256k1_v0_9_2_modinv32_signed30 *x, const rustsecp256k1_v0_9_2_modinv32_modinfo *modinfo) { +static void rustsecp256k1_v0_10_0_modinv32(rustsecp256k1_v0_10_0_modinv32_signed30 *x, const rustsecp256k1_v0_10_0_modinv32_modinfo *modinfo) { /* Start with d=0, e=1, f=modulus, g=x, zeta=-1. */ - rustsecp256k1_v0_9_2_modinv32_signed30 d = {{0}}; - rustsecp256k1_v0_9_2_modinv32_signed30 e = {{1}}; - rustsecp256k1_v0_9_2_modinv32_signed30 f = modinfo->modulus; - rustsecp256k1_v0_9_2_modinv32_signed30 g = *x; + rustsecp256k1_v0_10_0_modinv32_signed30 d = {{0}}; + rustsecp256k1_v0_10_0_modinv32_signed30 e = {{1}}; + rustsecp256k1_v0_10_0_modinv32_signed30 f = modinfo->modulus; + rustsecp256k1_v0_10_0_modinv32_signed30 g = *x; int i; int32_t zeta = -1; /* zeta = -(delta+1/2); delta is initially 1/2. */ /* Do 20 iterations of 30 divsteps each = 600 divsteps. 590 suffices for 256-bit inputs. */ for (i = 0; i < 20; ++i) { /* Compute transition matrix and new zeta after 30 divsteps. */ - rustsecp256k1_v0_9_2_modinv32_trans2x2 t; - zeta = rustsecp256k1_v0_9_2_modinv32_divsteps_30(zeta, f.v[0], g.v[0], &t); + rustsecp256k1_v0_10_0_modinv32_trans2x2 t; + zeta = rustsecp256k1_v0_10_0_modinv32_divsteps_30(zeta, f.v[0], g.v[0], &t); /* Update d,e using that transition matrix. */ - rustsecp256k1_v0_9_2_modinv32_update_de_30(&d, &e, &t, modinfo); + rustsecp256k1_v0_10_0_modinv32_update_de_30(&d, &e, &t, modinfo); /* Update f,g using that transition matrix. */ -#ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif - rustsecp256k1_v0_9_2_modinv32_update_fg_30(&f, &g, &t); -#ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */ + + rustsecp256k1_v0_10_0_modinv32_update_fg_30(&f, &g, &t); + + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */ } /* At this point sufficient iterations have been performed that g must have reached 0 * and (if g was not originally 0) f must now equal +/- GCD of the initial f, g * values i.e. +/- 1, and d now contains +/- the modular inverse. */ -#ifdef VERIFY + /* g == 0 */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&g, 9, &SECP256K1_SIGNED30_ONE, 0) == 0); + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&g, 9, &SECP256K1_SIGNED30_ONE, 0) == 0); /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&f, 9, &SECP256K1_SIGNED30_ONE, -1) == 0 || - rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&f, 9, &SECP256K1_SIGNED30_ONE, 1) == 0 || - (rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(x, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && - rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && - (rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) == 0 || - rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) == 0))); -#endif + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&f, 9, &SECP256K1_SIGNED30_ONE, -1) == 0 || + rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&f, 9, &SECP256K1_SIGNED30_ONE, 1) == 0 || + (rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(x, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && + rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && + (rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) == 0 || + rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) == 0))); /* Optionally negate d, normalize to [0,modulus), and return it. */ - rustsecp256k1_v0_9_2_modinv32_normalize_30(&d, f.v[8], modinfo); + rustsecp256k1_v0_10_0_modinv32_normalize_30(&d, f.v[8], modinfo); *x = d; } /* Compute the inverse of x modulo modinfo->modulus, and replace x with it (variable time). */ -static void rustsecp256k1_v0_9_2_modinv32_var(rustsecp256k1_v0_9_2_modinv32_signed30 *x, const rustsecp256k1_v0_9_2_modinv32_modinfo *modinfo) { +static void rustsecp256k1_v0_10_0_modinv32_var(rustsecp256k1_v0_10_0_modinv32_signed30 *x, const rustsecp256k1_v0_10_0_modinv32_modinfo *modinfo) { /* Start with d=0, e=1, f=modulus, g=x, eta=-1. */ - rustsecp256k1_v0_9_2_modinv32_signed30 d = {{0, 0, 0, 0, 0, 0, 0, 0, 0}}; - rustsecp256k1_v0_9_2_modinv32_signed30 e = {{1, 0, 0, 0, 0, 0, 0, 0, 0}}; - rustsecp256k1_v0_9_2_modinv32_signed30 f = modinfo->modulus; - rustsecp256k1_v0_9_2_modinv32_signed30 g = *x; + rustsecp256k1_v0_10_0_modinv32_signed30 d = {{0, 0, 0, 0, 0, 0, 0, 0, 0}}; + rustsecp256k1_v0_10_0_modinv32_signed30 e = {{1, 0, 0, 0, 0, 0, 0, 0, 0}}; + rustsecp256k1_v0_10_0_modinv32_signed30 f = modinfo->modulus; + rustsecp256k1_v0_10_0_modinv32_signed30 g = *x; #ifdef VERIFY int i = 0; #endif @@ -602,18 +595,18 @@ static void rustsecp256k1_v0_9_2_modinv32_var(rustsecp256k1_v0_9_2_modinv32_sign /* Do iterations of 30 divsteps each until g=0. */ while (1) { /* Compute transition matrix and new eta after 30 divsteps. */ - rustsecp256k1_v0_9_2_modinv32_trans2x2 t; - eta = rustsecp256k1_v0_9_2_modinv32_divsteps_30_var(eta, f.v[0], g.v[0], &t); + rustsecp256k1_v0_10_0_modinv32_trans2x2 t; + eta = rustsecp256k1_v0_10_0_modinv32_divsteps_30_var(eta, f.v[0], g.v[0], &t); /* Update d,e using that transition matrix. */ - rustsecp256k1_v0_9_2_modinv32_update_de_30(&d, &e, &t, modinfo); + rustsecp256k1_v0_10_0_modinv32_update_de_30(&d, &e, &t, modinfo); /* Update f,g using that transition matrix. */ -#ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif - rustsecp256k1_v0_9_2_modinv32_update_fg_30_var(len, &f, &g, &t); + + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ + + rustsecp256k1_v0_10_0_modinv32_update_fg_30_var(len, &f, &g, &t); /* If the bottom limb of g is 0, there is a chance g=0. */ if (g.v[0] == 0) { cond = 0; @@ -637,31 +630,29 @@ static void rustsecp256k1_v0_9_2_modinv32_var(rustsecp256k1_v0_9_2_modinv32_sign g.v[len - 2] |= (uint32_t)gn << 30; --len; } -#ifdef VERIFY + VERIFY_CHECK(++i < 25); /* We should never need more than 25*30 = 750 divsteps */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ } /* At this point g is 0 and (if g was not originally 0) f must now equal +/- GCD of * the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */ -#ifdef VERIFY + /* g == 0 */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&g, len, &SECP256K1_SIGNED30_ONE, 0) == 0); + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&g, len, &SECP256K1_SIGNED30_ONE, 0) == 0); /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&f, len, &SECP256K1_SIGNED30_ONE, -1) == 0 || - rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&f, len, &SECP256K1_SIGNED30_ONE, 1) == 0 || - (rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(x, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && - rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && - (rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) == 0 || - rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) == 0))); -#endif + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&f, len, &SECP256K1_SIGNED30_ONE, -1) == 0 || + rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&f, len, &SECP256K1_SIGNED30_ONE, 1) == 0 || + (rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(x, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && + rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && + (rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) == 0 || + rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) == 0))); /* Optionally negate d, normalize to [0,modulus), and return it. */ - rustsecp256k1_v0_9_2_modinv32_normalize_30(&d, f.v[len - 1], modinfo); + rustsecp256k1_v0_10_0_modinv32_normalize_30(&d, f.v[len - 1], modinfo); *x = d; } @@ -674,10 +665,10 @@ static void rustsecp256k1_v0_9_2_modinv32_var(rustsecp256k1_v0_9_2_modinv32_sign #endif /* Compute the Jacobi symbol of x modulo modinfo->modulus (variable time). gcd(x,modulus) must be 1. */ -static int rustsecp256k1_v0_9_2_jacobi32_maybe_var(const rustsecp256k1_v0_9_2_modinv32_signed30 *x, const rustsecp256k1_v0_9_2_modinv32_modinfo *modinfo) { +static int rustsecp256k1_v0_10_0_jacobi32_maybe_var(const rustsecp256k1_v0_10_0_modinv32_signed30 *x, const rustsecp256k1_v0_10_0_modinv32_modinfo *modinfo) { /* Start with f=modulus, g=x, eta=-1. */ - rustsecp256k1_v0_9_2_modinv32_signed30 f = modinfo->modulus; - rustsecp256k1_v0_9_2_modinv32_signed30 g = *x; + rustsecp256k1_v0_10_0_modinv32_signed30 f = modinfo->modulus; + rustsecp256k1_v0_10_0_modinv32_signed30 g = *x; int j, len = 9; int32_t eta = -1; /* eta = -delta; delta is initially 1 */ int32_t cond, fn, gn; @@ -694,16 +685,15 @@ static int rustsecp256k1_v0_9_2_jacobi32_maybe_var(const rustsecp256k1_v0_9_2_mo for (count = 0; count < JACOBI32_ITERATIONS; ++count) { /* Compute transition matrix and new eta after 30 posdivsteps. */ - rustsecp256k1_v0_9_2_modinv32_trans2x2 t; - eta = rustsecp256k1_v0_9_2_modinv32_posdivsteps_30_var(eta, f.v[0] | ((uint32_t)f.v[1] << 30), g.v[0] | ((uint32_t)g.v[1] << 30), &t, &jac); + rustsecp256k1_v0_10_0_modinv32_trans2x2 t; + eta = rustsecp256k1_v0_10_0_modinv32_posdivsteps_30_var(eta, f.v[0] | ((uint32_t)f.v[1] << 30), g.v[0] | ((uint32_t)g.v[1] << 30), &t, &jac); /* Update f,g using that transition matrix. */ -#ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif - rustsecp256k1_v0_9_2_modinv32_update_fg_30_var(len, &f, &g, &t); + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ + + rustsecp256k1_v0_10_0_modinv32_update_fg_30_var(len, &f, &g, &t); /* If the bottom limb of f is 1, there is a chance that f=1. */ if (f.v[0] == 1) { cond = 0; @@ -723,12 +713,11 @@ static int rustsecp256k1_v0_9_2_jacobi32_maybe_var(const rustsecp256k1_v0_9_2_mo cond |= gn; /* If so, reduce length. */ if (cond == 0) --len; -#ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif + + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ } /* The loop failed to converge to f=g after 1500 iterations. Return 0, indicating unknown result. */ diff --git a/secp256k1-sys/depend/secp256k1/src/modinv64.h b/secp256k1-sys/depend/secp256k1/src/modinv64.h index 698efcdf3..0c6fa91d1 100644 --- a/secp256k1-sys/depend/secp256k1/src/modinv64.h +++ b/secp256k1-sys/depend/secp256k1/src/modinv64.h @@ -18,15 +18,15 @@ * Its value is sum(v[i] * 2^(62*i), i=0..4). */ typedef struct { int64_t v[5]; -} rustsecp256k1_v0_9_2_modinv64_signed62; +} rustsecp256k1_v0_10_0_modinv64_signed62; typedef struct { /* The modulus in signed62 notation, must be odd and in [3, 2^256]. */ - rustsecp256k1_v0_9_2_modinv64_signed62 modulus; + rustsecp256k1_v0_10_0_modinv64_signed62 modulus; /* modulus^{-1} mod 2^62 */ uint64_t modulus_inv62; -} rustsecp256k1_v0_9_2_modinv64_modinfo; +} rustsecp256k1_v0_10_0_modinv64_modinfo; /* Replace x with its modular inverse mod modinfo->modulus. x must be in range [0, modulus). * If x is zero, the result will be zero as well. If not, the inverse must exist (i.e., the gcd of @@ -34,14 +34,14 @@ typedef struct { * * On output, all of x's limbs will be in [0, 2^62). */ -static void rustsecp256k1_v0_9_2_modinv64_var(rustsecp256k1_v0_9_2_modinv64_signed62 *x, const rustsecp256k1_v0_9_2_modinv64_modinfo *modinfo); +static void rustsecp256k1_v0_10_0_modinv64_var(rustsecp256k1_v0_10_0_modinv64_signed62 *x, const rustsecp256k1_v0_10_0_modinv64_modinfo *modinfo); -/* Same as rustsecp256k1_v0_9_2_modinv64_var, but constant time in x (not in the modulus). */ -static void rustsecp256k1_v0_9_2_modinv64(rustsecp256k1_v0_9_2_modinv64_signed62 *x, const rustsecp256k1_v0_9_2_modinv64_modinfo *modinfo); +/* Same as rustsecp256k1_v0_10_0_modinv64_var, but constant time in x (not in the modulus). */ +static void rustsecp256k1_v0_10_0_modinv64(rustsecp256k1_v0_10_0_modinv64_signed62 *x, const rustsecp256k1_v0_10_0_modinv64_modinfo *modinfo); /* Compute the Jacobi symbol for (x | modinfo->modulus). x must be coprime with modulus (and thus * cannot be 0, as modulus >= 3). All limbs of x must be non-negative. Returns 0 if the result * cannot be computed. */ -static int rustsecp256k1_v0_9_2_jacobi64_maybe_var(const rustsecp256k1_v0_9_2_modinv64_signed62 *x, const rustsecp256k1_v0_9_2_modinv64_modinfo *modinfo); +static int rustsecp256k1_v0_10_0_jacobi64_maybe_var(const rustsecp256k1_v0_10_0_modinv64_signed62 *x, const rustsecp256k1_v0_10_0_modinv64_modinfo *modinfo); #endif /* SECP256K1_MODINV64_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/modinv64_impl.h b/secp256k1-sys/depend/secp256k1/src/modinv64_impl.h index cad1da1cd..4ea806504 100644 --- a/secp256k1-sys/depend/secp256k1/src/modinv64_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modinv64_impl.h @@ -24,41 +24,41 @@ */ typedef struct { int64_t u, v, q, r; -} rustsecp256k1_v0_9_2_modinv64_trans2x2; +} rustsecp256k1_v0_10_0_modinv64_trans2x2; #ifdef VERIFY /* Helper function to compute the absolute value of an int64_t. * (we don't use abs/labs/llabs as it depends on the int sizes). */ -static int64_t rustsecp256k1_v0_9_2_modinv64_abs(int64_t v) { +static int64_t rustsecp256k1_v0_10_0_modinv64_abs(int64_t v) { VERIFY_CHECK(v > INT64_MIN); if (v < 0) return -v; return v; } -static const rustsecp256k1_v0_9_2_modinv64_signed62 SECP256K1_SIGNED62_ONE = {{1}}; +static const rustsecp256k1_v0_10_0_modinv64_signed62 SECP256K1_SIGNED62_ONE = {{1}}; /* Compute a*factor and put it in r. All but the top limb in r will be in range [0,2^62). */ -static void rustsecp256k1_v0_9_2_modinv64_mul_62(rustsecp256k1_v0_9_2_modinv64_signed62 *r, const rustsecp256k1_v0_9_2_modinv64_signed62 *a, int alen, int64_t factor) { +static void rustsecp256k1_v0_10_0_modinv64_mul_62(rustsecp256k1_v0_10_0_modinv64_signed62 *r, const rustsecp256k1_v0_10_0_modinv64_signed62 *a, int alen, int64_t factor) { const uint64_t M62 = UINT64_MAX >> 2; - rustsecp256k1_v0_9_2_int128 c, d; + rustsecp256k1_v0_10_0_int128 c, d; int i; - rustsecp256k1_v0_9_2_i128_from_i64(&c, 0); + rustsecp256k1_v0_10_0_i128_from_i64(&c, 0); for (i = 0; i < 4; ++i) { - if (i < alen) rustsecp256k1_v0_9_2_i128_accum_mul(&c, a->v[i], factor); - r->v[i] = rustsecp256k1_v0_9_2_i128_to_u64(&c) & M62; rustsecp256k1_v0_9_2_i128_rshift(&c, 62); + if (i < alen) rustsecp256k1_v0_10_0_i128_accum_mul(&c, a->v[i], factor); + r->v[i] = rustsecp256k1_v0_10_0_i128_to_u64(&c) & M62; rustsecp256k1_v0_10_0_i128_rshift(&c, 62); } - if (4 < alen) rustsecp256k1_v0_9_2_i128_accum_mul(&c, a->v[4], factor); - rustsecp256k1_v0_9_2_i128_from_i64(&d, rustsecp256k1_v0_9_2_i128_to_i64(&c)); - VERIFY_CHECK(rustsecp256k1_v0_9_2_i128_eq_var(&c, &d)); - r->v[4] = rustsecp256k1_v0_9_2_i128_to_i64(&c); + if (4 < alen) rustsecp256k1_v0_10_0_i128_accum_mul(&c, a->v[4], factor); + rustsecp256k1_v0_10_0_i128_from_i64(&d, rustsecp256k1_v0_10_0_i128_to_i64(&c)); + VERIFY_CHECK(rustsecp256k1_v0_10_0_i128_eq_var(&c, &d)); + r->v[4] = rustsecp256k1_v0_10_0_i128_to_i64(&c); } /* Return -1 for ab*factor. A has alen limbs; b has 5. */ -static int rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(const rustsecp256k1_v0_9_2_modinv64_signed62 *a, int alen, const rustsecp256k1_v0_9_2_modinv64_signed62 *b, int64_t factor) { +static int rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(const rustsecp256k1_v0_10_0_modinv64_signed62 *a, int alen, const rustsecp256k1_v0_10_0_modinv64_signed62 *b, int64_t factor) { int i; - rustsecp256k1_v0_9_2_modinv64_signed62 am, bm; - rustsecp256k1_v0_9_2_modinv64_mul_62(&am, a, alen, 1); /* Normalize all but the top limb of a. */ - rustsecp256k1_v0_9_2_modinv64_mul_62(&bm, b, 5, factor); + rustsecp256k1_v0_10_0_modinv64_signed62 am, bm; + rustsecp256k1_v0_10_0_modinv64_mul_62(&am, a, alen, 1); /* Normalize all but the top limb of a. */ + rustsecp256k1_v0_10_0_modinv64_mul_62(&bm, b, 5, factor); for (i = 0; i < 4; ++i) { /* Verify that all but the top limb of a and b are normalized. */ VERIFY_CHECK(am.v[i] >> 62 == 0); @@ -72,11 +72,11 @@ static int rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(const rustsecp256k1_v0_9_2_m } /* Check if the determinant of t is equal to 1 << n. If abs, check if |det t| == 1 << n. */ -static int rustsecp256k1_v0_9_2_modinv64_det_check_pow2(const rustsecp256k1_v0_9_2_modinv64_trans2x2 *t, unsigned int n, int abs) { - rustsecp256k1_v0_9_2_int128 a; - rustsecp256k1_v0_9_2_i128_det(&a, t->u, t->v, t->q, t->r); - if (rustsecp256k1_v0_9_2_i128_check_pow2(&a, n, 1)) return 1; - if (abs && rustsecp256k1_v0_9_2_i128_check_pow2(&a, n, -1)) return 1; +static int rustsecp256k1_v0_10_0_modinv64_det_check_pow2(const rustsecp256k1_v0_10_0_modinv64_trans2x2 *t, unsigned int n, int abs) { + rustsecp256k1_v0_10_0_int128 a; + rustsecp256k1_v0_10_0_i128_det(&a, t->u, t->v, t->q, t->r); + if (rustsecp256k1_v0_10_0_i128_check_pow2(&a, n, 1)) return 1; + if (abs && rustsecp256k1_v0_10_0_i128_check_pow2(&a, n, -1)) return 1; return 0; } #endif @@ -85,7 +85,7 @@ static int rustsecp256k1_v0_9_2_modinv64_det_check_pow2(const rustsecp256k1_v0_9 * to it to bring it to range [0,modulus). If sign < 0, the input will also be negated in the * process. The input must have limbs in range (-2^62,2^62). The output will have limbs in range * [0,2^62). */ -static void rustsecp256k1_v0_9_2_modinv64_normalize_62(rustsecp256k1_v0_9_2_modinv64_signed62 *r, int64_t sign, const rustsecp256k1_v0_9_2_modinv64_modinfo *modinfo) { +static void rustsecp256k1_v0_10_0_modinv64_normalize_62(rustsecp256k1_v0_10_0_modinv64_signed62 *r, int64_t sign, const rustsecp256k1_v0_10_0_modinv64_modinfo *modinfo) { const int64_t M62 = (int64_t)(UINT64_MAX >> 2); int64_t r0 = r->v[0], r1 = r->v[1], r2 = r->v[2], r3 = r->v[3], r4 = r->v[4]; volatile int64_t cond_add, cond_negate; @@ -97,8 +97,8 @@ static void rustsecp256k1_v0_9_2_modinv64_normalize_62(rustsecp256k1_v0_9_2_modi VERIFY_CHECK(r->v[i] >= -M62); VERIFY_CHECK(r->v[i] <= M62); } - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, -2) > 0); /* r > -2*modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, -2) > 0); /* r > -2*modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */ #endif /* In a first step, add the modulus if the input is negative, and then negate if requested. @@ -144,15 +144,13 @@ static void rustsecp256k1_v0_9_2_modinv64_normalize_62(rustsecp256k1_v0_9_2_modi r->v[3] = r3; r->v[4] = r4; -#ifdef VERIFY VERIFY_CHECK(r0 >> 62 == 0); VERIFY_CHECK(r1 >> 62 == 0); VERIFY_CHECK(r2 >> 62 == 0); VERIFY_CHECK(r3 >> 62 == 0); VERIFY_CHECK(r4 >> 62 == 0); - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 0) >= 0); /* r >= 0 */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */ -#endif + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 0) >= 0); /* r >= 0 */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */ } /* Compute the transition matrix and eta for 59 divsteps (where zeta=-(delta+1/2)). @@ -166,7 +164,7 @@ static void rustsecp256k1_v0_9_2_modinv64_normalize_62(rustsecp256k1_v0_9_2_modi * * Implements the divsteps_n_matrix function from the explanation. */ -static int64_t rustsecp256k1_v0_9_2_modinv64_divsteps_59(int64_t zeta, uint64_t f0, uint64_t g0, rustsecp256k1_v0_9_2_modinv64_trans2x2 *t) { +static int64_t rustsecp256k1_v0_10_0_modinv64_divsteps_59(int64_t zeta, uint64_t f0, uint64_t g0, rustsecp256k1_v0_10_0_modinv64_trans2x2 *t) { /* u,v,q,r are the elements of the transformation matrix being built up, * starting with the identity matrix times 8 (because the caller expects * a result scaled by 2^62). Semantically they are signed integers @@ -216,15 +214,15 @@ static int64_t rustsecp256k1_v0_9_2_modinv64_divsteps_59(int64_t zeta, uint64_t t->v = (int64_t)v; t->q = (int64_t)q; t->r = (int64_t)r; -#ifdef VERIFY + /* The determinant of t must be a power of two. This guarantees that multiplication with t * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which * will be divided out again). As each divstep's individual matrix has determinant 2, the * aggregate of 59 of them will have determinant 2^59. Multiplying with the initial * 8*identity (which has determinant 2^6) means the overall outputs has determinant * 2^65. */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_det_check_pow2(t, 65, 0)); -#endif + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_det_check_pow2(t, 65, 0)); + return zeta; } @@ -238,8 +236,8 @@ static int64_t rustsecp256k1_v0_9_2_modinv64_divsteps_59(int64_t zeta, uint64_t * * Implements the divsteps_n_matrix_var function from the explanation. */ -static int64_t rustsecp256k1_v0_9_2_modinv64_divsteps_62_var(int64_t eta, uint64_t f0, uint64_t g0, rustsecp256k1_v0_9_2_modinv64_trans2x2 *t) { - /* Transformation matrix; see comments in rustsecp256k1_v0_9_2_modinv64_divsteps_62. */ +static int64_t rustsecp256k1_v0_10_0_modinv64_divsteps_62_var(int64_t eta, uint64_t f0, uint64_t g0, rustsecp256k1_v0_10_0_modinv64_trans2x2 *t) { + /* Transformation matrix; see comments in rustsecp256k1_v0_10_0_modinv64_divsteps_62. */ uint64_t u = 1, v = 0, q = 0, r = 1; uint64_t f = f0, g = g0, m; uint32_t w; @@ -247,7 +245,7 @@ static int64_t rustsecp256k1_v0_9_2_modinv64_divsteps_62_var(int64_t eta, uint64 for (;;) { /* Use a sentinel bit to count zeros only up to i. */ - zeros = rustsecp256k1_v0_9_2_ctz64_var(g | (UINT64_MAX << i)); + zeros = rustsecp256k1_v0_10_0_ctz64_var(g | (UINT64_MAX << i)); /* Perform zeros divsteps at once; they all just divide g by two. */ g >>= zeros; u <<= zeros; @@ -301,13 +299,13 @@ static int64_t rustsecp256k1_v0_9_2_modinv64_divsteps_62_var(int64_t eta, uint64 t->v = (int64_t)v; t->q = (int64_t)q; t->r = (int64_t)r; -#ifdef VERIFY + /* The determinant of t must be a power of two. This guarantees that multiplication with t * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which * will be divided out again). As each divstep's individual matrix has determinant 2, the * aggregate of 62 of them will have determinant 2^62. */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_det_check_pow2(t, 62, 0)); -#endif + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_det_check_pow2(t, 62, 0)); + return eta; } @@ -324,8 +322,8 @@ static int64_t rustsecp256k1_v0_9_2_modinv64_divsteps_62_var(int64_t eta, uint64 * change, but are meaningless. * Return: final eta */ -static int64_t rustsecp256k1_v0_9_2_modinv64_posdivsteps_62_var(int64_t eta, uint64_t f0, uint64_t g0, rustsecp256k1_v0_9_2_modinv64_trans2x2 *t, int *jacp) { - /* Transformation matrix; see comments in rustsecp256k1_v0_9_2_modinv64_divsteps_62. */ +static int64_t rustsecp256k1_v0_10_0_modinv64_posdivsteps_62_var(int64_t eta, uint64_t f0, uint64_t g0, rustsecp256k1_v0_10_0_modinv64_trans2x2 *t, int *jacp) { + /* Transformation matrix; see comments in rustsecp256k1_v0_10_0_modinv64_divsteps_62. */ uint64_t u = 1, v = 0, q = 0, r = 1; uint64_t f = f0, g = g0, m; uint32_t w; @@ -334,7 +332,7 @@ static int64_t rustsecp256k1_v0_9_2_modinv64_posdivsteps_62_var(int64_t eta, uin for (;;) { /* Use a sentinel bit to count zeros only up to i. */ - zeros = rustsecp256k1_v0_9_2_ctz64_var(g | (UINT64_MAX << i)); + zeros = rustsecp256k1_v0_10_0_ctz64_var(g | (UINT64_MAX << i)); /* Perform zeros divsteps at once; they all just divide g by two. */ g >>= zeros; u <<= zeros; @@ -392,13 +390,13 @@ static int64_t rustsecp256k1_v0_9_2_modinv64_posdivsteps_62_var(int64_t eta, uin t->v = (int64_t)v; t->q = (int64_t)q; t->r = (int64_t)r; -#ifdef VERIFY + /* The determinant of t must be a power of two. This guarantees that multiplication with t * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which * will be divided out again). As each divstep's individual matrix has determinant 2 or -2, * the aggregate of 62 of them will have determinant 2^62 or -2^62. */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_det_check_pow2(t, 62, 1)); -#endif + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_det_check_pow2(t, 62, 1)); + *jacp = jac; return eta; } @@ -410,142 +408,140 @@ static int64_t rustsecp256k1_v0_9_2_modinv64_posdivsteps_62_var(int64_t eta, uin * * This implements the update_de function from the explanation. */ -static void rustsecp256k1_v0_9_2_modinv64_update_de_62(rustsecp256k1_v0_9_2_modinv64_signed62 *d, rustsecp256k1_v0_9_2_modinv64_signed62 *e, const rustsecp256k1_v0_9_2_modinv64_trans2x2 *t, const rustsecp256k1_v0_9_2_modinv64_modinfo* modinfo) { +static void rustsecp256k1_v0_10_0_modinv64_update_de_62(rustsecp256k1_v0_10_0_modinv64_signed62 *d, rustsecp256k1_v0_10_0_modinv64_signed62 *e, const rustsecp256k1_v0_10_0_modinv64_trans2x2 *t, const rustsecp256k1_v0_10_0_modinv64_modinfo* modinfo) { const uint64_t M62 = UINT64_MAX >> 2; const int64_t d0 = d->v[0], d1 = d->v[1], d2 = d->v[2], d3 = d->v[3], d4 = d->v[4]; const int64_t e0 = e->v[0], e1 = e->v[1], e2 = e->v[2], e3 = e->v[3], e4 = e->v[4]; const int64_t u = t->u, v = t->v, q = t->q, r = t->r; int64_t md, me, sd, se; - rustsecp256k1_v0_9_2_int128 cd, ce; -#ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_abs(u) <= (((int64_t)1 << 62) - rustsecp256k1_v0_9_2_modinv64_abs(v))); /* |u|+|v| <= 2^62 */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_abs(q) <= (((int64_t)1 << 62) - rustsecp256k1_v0_9_2_modinv64_abs(r))); /* |q|+|r| <= 2^62 */ -#endif + rustsecp256k1_v0_10_0_int128 cd, ce; + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_abs(u) <= (((int64_t)1 << 62) - rustsecp256k1_v0_10_0_modinv64_abs(v))); /* |u|+|v| <= 2^62 */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_abs(q) <= (((int64_t)1 << 62) - rustsecp256k1_v0_10_0_modinv64_abs(r))); /* |q|+|r| <= 2^62 */ + /* [md,me] start as zero; plus [u,q] if d is negative; plus [v,r] if e is negative. */ sd = d4 >> 63; se = e4 >> 63; md = (u & sd) + (v & se); me = (q & sd) + (r & se); /* Begin computing t*[d,e]. */ - rustsecp256k1_v0_9_2_i128_mul(&cd, u, d0); - rustsecp256k1_v0_9_2_i128_accum_mul(&cd, v, e0); - rustsecp256k1_v0_9_2_i128_mul(&ce, q, d0); - rustsecp256k1_v0_9_2_i128_accum_mul(&ce, r, e0); + rustsecp256k1_v0_10_0_i128_mul(&cd, u, d0); + rustsecp256k1_v0_10_0_i128_accum_mul(&cd, v, e0); + rustsecp256k1_v0_10_0_i128_mul(&ce, q, d0); + rustsecp256k1_v0_10_0_i128_accum_mul(&ce, r, e0); /* Correct md,me so that t*[d,e]+modulus*[md,me] has 62 zero bottom bits. */ - md -= (modinfo->modulus_inv62 * rustsecp256k1_v0_9_2_i128_to_u64(&cd) + md) & M62; - me -= (modinfo->modulus_inv62 * rustsecp256k1_v0_9_2_i128_to_u64(&ce) + me) & M62; + md -= (modinfo->modulus_inv62 * rustsecp256k1_v0_10_0_i128_to_u64(&cd) + md) & M62; + me -= (modinfo->modulus_inv62 * rustsecp256k1_v0_10_0_i128_to_u64(&ce) + me) & M62; /* Update the beginning of computation for t*[d,e]+modulus*[md,me] now md,me are known. */ - rustsecp256k1_v0_9_2_i128_accum_mul(&cd, modinfo->modulus.v[0], md); - rustsecp256k1_v0_9_2_i128_accum_mul(&ce, modinfo->modulus.v[0], me); + rustsecp256k1_v0_10_0_i128_accum_mul(&cd, modinfo->modulus.v[0], md); + rustsecp256k1_v0_10_0_i128_accum_mul(&ce, modinfo->modulus.v[0], me); /* Verify that the low 62 bits of the computation are indeed zero, and then throw them away. */ - VERIFY_CHECK((rustsecp256k1_v0_9_2_i128_to_u64(&cd) & M62) == 0); rustsecp256k1_v0_9_2_i128_rshift(&cd, 62); - VERIFY_CHECK((rustsecp256k1_v0_9_2_i128_to_u64(&ce) & M62) == 0); rustsecp256k1_v0_9_2_i128_rshift(&ce, 62); + VERIFY_CHECK((rustsecp256k1_v0_10_0_i128_to_u64(&cd) & M62) == 0); rustsecp256k1_v0_10_0_i128_rshift(&cd, 62); + VERIFY_CHECK((rustsecp256k1_v0_10_0_i128_to_u64(&ce) & M62) == 0); rustsecp256k1_v0_10_0_i128_rshift(&ce, 62); /* Compute limb 1 of t*[d,e]+modulus*[md,me], and store it as output limb 0 (= down shift). */ - rustsecp256k1_v0_9_2_i128_accum_mul(&cd, u, d1); - rustsecp256k1_v0_9_2_i128_accum_mul(&cd, v, e1); - rustsecp256k1_v0_9_2_i128_accum_mul(&ce, q, d1); - rustsecp256k1_v0_9_2_i128_accum_mul(&ce, r, e1); + rustsecp256k1_v0_10_0_i128_accum_mul(&cd, u, d1); + rustsecp256k1_v0_10_0_i128_accum_mul(&cd, v, e1); + rustsecp256k1_v0_10_0_i128_accum_mul(&ce, q, d1); + rustsecp256k1_v0_10_0_i128_accum_mul(&ce, r, e1); if (modinfo->modulus.v[1]) { /* Optimize for the case where limb of modulus is zero. */ - rustsecp256k1_v0_9_2_i128_accum_mul(&cd, modinfo->modulus.v[1], md); - rustsecp256k1_v0_9_2_i128_accum_mul(&ce, modinfo->modulus.v[1], me); + rustsecp256k1_v0_10_0_i128_accum_mul(&cd, modinfo->modulus.v[1], md); + rustsecp256k1_v0_10_0_i128_accum_mul(&ce, modinfo->modulus.v[1], me); } - d->v[0] = rustsecp256k1_v0_9_2_i128_to_u64(&cd) & M62; rustsecp256k1_v0_9_2_i128_rshift(&cd, 62); - e->v[0] = rustsecp256k1_v0_9_2_i128_to_u64(&ce) & M62; rustsecp256k1_v0_9_2_i128_rshift(&ce, 62); + d->v[0] = rustsecp256k1_v0_10_0_i128_to_u64(&cd) & M62; rustsecp256k1_v0_10_0_i128_rshift(&cd, 62); + e->v[0] = rustsecp256k1_v0_10_0_i128_to_u64(&ce) & M62; rustsecp256k1_v0_10_0_i128_rshift(&ce, 62); /* Compute limb 2 of t*[d,e]+modulus*[md,me], and store it as output limb 1. */ - rustsecp256k1_v0_9_2_i128_accum_mul(&cd, u, d2); - rustsecp256k1_v0_9_2_i128_accum_mul(&cd, v, e2); - rustsecp256k1_v0_9_2_i128_accum_mul(&ce, q, d2); - rustsecp256k1_v0_9_2_i128_accum_mul(&ce, r, e2); + rustsecp256k1_v0_10_0_i128_accum_mul(&cd, u, d2); + rustsecp256k1_v0_10_0_i128_accum_mul(&cd, v, e2); + rustsecp256k1_v0_10_0_i128_accum_mul(&ce, q, d2); + rustsecp256k1_v0_10_0_i128_accum_mul(&ce, r, e2); if (modinfo->modulus.v[2]) { /* Optimize for the case where limb of modulus is zero. */ - rustsecp256k1_v0_9_2_i128_accum_mul(&cd, modinfo->modulus.v[2], md); - rustsecp256k1_v0_9_2_i128_accum_mul(&ce, modinfo->modulus.v[2], me); + rustsecp256k1_v0_10_0_i128_accum_mul(&cd, modinfo->modulus.v[2], md); + rustsecp256k1_v0_10_0_i128_accum_mul(&ce, modinfo->modulus.v[2], me); } - d->v[1] = rustsecp256k1_v0_9_2_i128_to_u64(&cd) & M62; rustsecp256k1_v0_9_2_i128_rshift(&cd, 62); - e->v[1] = rustsecp256k1_v0_9_2_i128_to_u64(&ce) & M62; rustsecp256k1_v0_9_2_i128_rshift(&ce, 62); + d->v[1] = rustsecp256k1_v0_10_0_i128_to_u64(&cd) & M62; rustsecp256k1_v0_10_0_i128_rshift(&cd, 62); + e->v[1] = rustsecp256k1_v0_10_0_i128_to_u64(&ce) & M62; rustsecp256k1_v0_10_0_i128_rshift(&ce, 62); /* Compute limb 3 of t*[d,e]+modulus*[md,me], and store it as output limb 2. */ - rustsecp256k1_v0_9_2_i128_accum_mul(&cd, u, d3); - rustsecp256k1_v0_9_2_i128_accum_mul(&cd, v, e3); - rustsecp256k1_v0_9_2_i128_accum_mul(&ce, q, d3); - rustsecp256k1_v0_9_2_i128_accum_mul(&ce, r, e3); + rustsecp256k1_v0_10_0_i128_accum_mul(&cd, u, d3); + rustsecp256k1_v0_10_0_i128_accum_mul(&cd, v, e3); + rustsecp256k1_v0_10_0_i128_accum_mul(&ce, q, d3); + rustsecp256k1_v0_10_0_i128_accum_mul(&ce, r, e3); if (modinfo->modulus.v[3]) { /* Optimize for the case where limb of modulus is zero. */ - rustsecp256k1_v0_9_2_i128_accum_mul(&cd, modinfo->modulus.v[3], md); - rustsecp256k1_v0_9_2_i128_accum_mul(&ce, modinfo->modulus.v[3], me); + rustsecp256k1_v0_10_0_i128_accum_mul(&cd, modinfo->modulus.v[3], md); + rustsecp256k1_v0_10_0_i128_accum_mul(&ce, modinfo->modulus.v[3], me); } - d->v[2] = rustsecp256k1_v0_9_2_i128_to_u64(&cd) & M62; rustsecp256k1_v0_9_2_i128_rshift(&cd, 62); - e->v[2] = rustsecp256k1_v0_9_2_i128_to_u64(&ce) & M62; rustsecp256k1_v0_9_2_i128_rshift(&ce, 62); + d->v[2] = rustsecp256k1_v0_10_0_i128_to_u64(&cd) & M62; rustsecp256k1_v0_10_0_i128_rshift(&cd, 62); + e->v[2] = rustsecp256k1_v0_10_0_i128_to_u64(&ce) & M62; rustsecp256k1_v0_10_0_i128_rshift(&ce, 62); /* Compute limb 4 of t*[d,e]+modulus*[md,me], and store it as output limb 3. */ - rustsecp256k1_v0_9_2_i128_accum_mul(&cd, u, d4); - rustsecp256k1_v0_9_2_i128_accum_mul(&cd, v, e4); - rustsecp256k1_v0_9_2_i128_accum_mul(&ce, q, d4); - rustsecp256k1_v0_9_2_i128_accum_mul(&ce, r, e4); - rustsecp256k1_v0_9_2_i128_accum_mul(&cd, modinfo->modulus.v[4], md); - rustsecp256k1_v0_9_2_i128_accum_mul(&ce, modinfo->modulus.v[4], me); - d->v[3] = rustsecp256k1_v0_9_2_i128_to_u64(&cd) & M62; rustsecp256k1_v0_9_2_i128_rshift(&cd, 62); - e->v[3] = rustsecp256k1_v0_9_2_i128_to_u64(&ce) & M62; rustsecp256k1_v0_9_2_i128_rshift(&ce, 62); + rustsecp256k1_v0_10_0_i128_accum_mul(&cd, u, d4); + rustsecp256k1_v0_10_0_i128_accum_mul(&cd, v, e4); + rustsecp256k1_v0_10_0_i128_accum_mul(&ce, q, d4); + rustsecp256k1_v0_10_0_i128_accum_mul(&ce, r, e4); + rustsecp256k1_v0_10_0_i128_accum_mul(&cd, modinfo->modulus.v[4], md); + rustsecp256k1_v0_10_0_i128_accum_mul(&ce, modinfo->modulus.v[4], me); + d->v[3] = rustsecp256k1_v0_10_0_i128_to_u64(&cd) & M62; rustsecp256k1_v0_10_0_i128_rshift(&cd, 62); + e->v[3] = rustsecp256k1_v0_10_0_i128_to_u64(&ce) & M62; rustsecp256k1_v0_10_0_i128_rshift(&ce, 62); /* What remains is limb 5 of t*[d,e]+modulus*[md,me]; store it as output limb 4. */ - d->v[4] = rustsecp256k1_v0_9_2_i128_to_i64(&cd); - e->v[4] = rustsecp256k1_v0_9_2_i128_to_i64(&ce); -#ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */ -#endif + d->v[4] = rustsecp256k1_v0_10_0_i128_to_i64(&cd); + e->v[4] = rustsecp256k1_v0_10_0_i128_to_i64(&ce); + + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */ } /* Compute (t/2^62) * [f, g], where t is a transition matrix scaled by 2^62. * * This implements the update_fg function from the explanation. */ -static void rustsecp256k1_v0_9_2_modinv64_update_fg_62(rustsecp256k1_v0_9_2_modinv64_signed62 *f, rustsecp256k1_v0_9_2_modinv64_signed62 *g, const rustsecp256k1_v0_9_2_modinv64_trans2x2 *t) { +static void rustsecp256k1_v0_10_0_modinv64_update_fg_62(rustsecp256k1_v0_10_0_modinv64_signed62 *f, rustsecp256k1_v0_10_0_modinv64_signed62 *g, const rustsecp256k1_v0_10_0_modinv64_trans2x2 *t) { const uint64_t M62 = UINT64_MAX >> 2; const int64_t f0 = f->v[0], f1 = f->v[1], f2 = f->v[2], f3 = f->v[3], f4 = f->v[4]; const int64_t g0 = g->v[0], g1 = g->v[1], g2 = g->v[2], g3 = g->v[3], g4 = g->v[4]; const int64_t u = t->u, v = t->v, q = t->q, r = t->r; - rustsecp256k1_v0_9_2_int128 cf, cg; + rustsecp256k1_v0_10_0_int128 cf, cg; /* Start computing t*[f,g]. */ - rustsecp256k1_v0_9_2_i128_mul(&cf, u, f0); - rustsecp256k1_v0_9_2_i128_accum_mul(&cf, v, g0); - rustsecp256k1_v0_9_2_i128_mul(&cg, q, f0); - rustsecp256k1_v0_9_2_i128_accum_mul(&cg, r, g0); + rustsecp256k1_v0_10_0_i128_mul(&cf, u, f0); + rustsecp256k1_v0_10_0_i128_accum_mul(&cf, v, g0); + rustsecp256k1_v0_10_0_i128_mul(&cg, q, f0); + rustsecp256k1_v0_10_0_i128_accum_mul(&cg, r, g0); /* Verify that the bottom 62 bits of the result are zero, and then throw them away. */ - VERIFY_CHECK((rustsecp256k1_v0_9_2_i128_to_u64(&cf) & M62) == 0); rustsecp256k1_v0_9_2_i128_rshift(&cf, 62); - VERIFY_CHECK((rustsecp256k1_v0_9_2_i128_to_u64(&cg) & M62) == 0); rustsecp256k1_v0_9_2_i128_rshift(&cg, 62); + VERIFY_CHECK((rustsecp256k1_v0_10_0_i128_to_u64(&cf) & M62) == 0); rustsecp256k1_v0_10_0_i128_rshift(&cf, 62); + VERIFY_CHECK((rustsecp256k1_v0_10_0_i128_to_u64(&cg) & M62) == 0); rustsecp256k1_v0_10_0_i128_rshift(&cg, 62); /* Compute limb 1 of t*[f,g], and store it as output limb 0 (= down shift). */ - rustsecp256k1_v0_9_2_i128_accum_mul(&cf, u, f1); - rustsecp256k1_v0_9_2_i128_accum_mul(&cf, v, g1); - rustsecp256k1_v0_9_2_i128_accum_mul(&cg, q, f1); - rustsecp256k1_v0_9_2_i128_accum_mul(&cg, r, g1); - f->v[0] = rustsecp256k1_v0_9_2_i128_to_u64(&cf) & M62; rustsecp256k1_v0_9_2_i128_rshift(&cf, 62); - g->v[0] = rustsecp256k1_v0_9_2_i128_to_u64(&cg) & M62; rustsecp256k1_v0_9_2_i128_rshift(&cg, 62); + rustsecp256k1_v0_10_0_i128_accum_mul(&cf, u, f1); + rustsecp256k1_v0_10_0_i128_accum_mul(&cf, v, g1); + rustsecp256k1_v0_10_0_i128_accum_mul(&cg, q, f1); + rustsecp256k1_v0_10_0_i128_accum_mul(&cg, r, g1); + f->v[0] = rustsecp256k1_v0_10_0_i128_to_u64(&cf) & M62; rustsecp256k1_v0_10_0_i128_rshift(&cf, 62); + g->v[0] = rustsecp256k1_v0_10_0_i128_to_u64(&cg) & M62; rustsecp256k1_v0_10_0_i128_rshift(&cg, 62); /* Compute limb 2 of t*[f,g], and store it as output limb 1. */ - rustsecp256k1_v0_9_2_i128_accum_mul(&cf, u, f2); - rustsecp256k1_v0_9_2_i128_accum_mul(&cf, v, g2); - rustsecp256k1_v0_9_2_i128_accum_mul(&cg, q, f2); - rustsecp256k1_v0_9_2_i128_accum_mul(&cg, r, g2); - f->v[1] = rustsecp256k1_v0_9_2_i128_to_u64(&cf) & M62; rustsecp256k1_v0_9_2_i128_rshift(&cf, 62); - g->v[1] = rustsecp256k1_v0_9_2_i128_to_u64(&cg) & M62; rustsecp256k1_v0_9_2_i128_rshift(&cg, 62); + rustsecp256k1_v0_10_0_i128_accum_mul(&cf, u, f2); + rustsecp256k1_v0_10_0_i128_accum_mul(&cf, v, g2); + rustsecp256k1_v0_10_0_i128_accum_mul(&cg, q, f2); + rustsecp256k1_v0_10_0_i128_accum_mul(&cg, r, g2); + f->v[1] = rustsecp256k1_v0_10_0_i128_to_u64(&cf) & M62; rustsecp256k1_v0_10_0_i128_rshift(&cf, 62); + g->v[1] = rustsecp256k1_v0_10_0_i128_to_u64(&cg) & M62; rustsecp256k1_v0_10_0_i128_rshift(&cg, 62); /* Compute limb 3 of t*[f,g], and store it as output limb 2. */ - rustsecp256k1_v0_9_2_i128_accum_mul(&cf, u, f3); - rustsecp256k1_v0_9_2_i128_accum_mul(&cf, v, g3); - rustsecp256k1_v0_9_2_i128_accum_mul(&cg, q, f3); - rustsecp256k1_v0_9_2_i128_accum_mul(&cg, r, g3); - f->v[2] = rustsecp256k1_v0_9_2_i128_to_u64(&cf) & M62; rustsecp256k1_v0_9_2_i128_rshift(&cf, 62); - g->v[2] = rustsecp256k1_v0_9_2_i128_to_u64(&cg) & M62; rustsecp256k1_v0_9_2_i128_rshift(&cg, 62); + rustsecp256k1_v0_10_0_i128_accum_mul(&cf, u, f3); + rustsecp256k1_v0_10_0_i128_accum_mul(&cf, v, g3); + rustsecp256k1_v0_10_0_i128_accum_mul(&cg, q, f3); + rustsecp256k1_v0_10_0_i128_accum_mul(&cg, r, g3); + f->v[2] = rustsecp256k1_v0_10_0_i128_to_u64(&cf) & M62; rustsecp256k1_v0_10_0_i128_rshift(&cf, 62); + g->v[2] = rustsecp256k1_v0_10_0_i128_to_u64(&cg) & M62; rustsecp256k1_v0_10_0_i128_rshift(&cg, 62); /* Compute limb 4 of t*[f,g], and store it as output limb 3. */ - rustsecp256k1_v0_9_2_i128_accum_mul(&cf, u, f4); - rustsecp256k1_v0_9_2_i128_accum_mul(&cf, v, g4); - rustsecp256k1_v0_9_2_i128_accum_mul(&cg, q, f4); - rustsecp256k1_v0_9_2_i128_accum_mul(&cg, r, g4); - f->v[3] = rustsecp256k1_v0_9_2_i128_to_u64(&cf) & M62; rustsecp256k1_v0_9_2_i128_rshift(&cf, 62); - g->v[3] = rustsecp256k1_v0_9_2_i128_to_u64(&cg) & M62; rustsecp256k1_v0_9_2_i128_rshift(&cg, 62); + rustsecp256k1_v0_10_0_i128_accum_mul(&cf, u, f4); + rustsecp256k1_v0_10_0_i128_accum_mul(&cf, v, g4); + rustsecp256k1_v0_10_0_i128_accum_mul(&cg, q, f4); + rustsecp256k1_v0_10_0_i128_accum_mul(&cg, r, g4); + f->v[3] = rustsecp256k1_v0_10_0_i128_to_u64(&cf) & M62; rustsecp256k1_v0_10_0_i128_rshift(&cf, 62); + g->v[3] = rustsecp256k1_v0_10_0_i128_to_u64(&cg) & M62; rustsecp256k1_v0_10_0_i128_rshift(&cg, 62); /* What remains is limb 5 of t*[f,g]; store it as output limb 4. */ - f->v[4] = rustsecp256k1_v0_9_2_i128_to_i64(&cf); - g->v[4] = rustsecp256k1_v0_9_2_i128_to_i64(&cg); + f->v[4] = rustsecp256k1_v0_10_0_i128_to_i64(&cf); + g->v[4] = rustsecp256k1_v0_10_0_i128_to_i64(&cg); } /* Compute (t/2^62) * [f, g], where t is a transition matrix for 62 divsteps. @@ -554,100 +550,97 @@ static void rustsecp256k1_v0_9_2_modinv64_update_fg_62(rustsecp256k1_v0_9_2_modi * * This implements the update_fg function from the explanation. */ -static void rustsecp256k1_v0_9_2_modinv64_update_fg_62_var(int len, rustsecp256k1_v0_9_2_modinv64_signed62 *f, rustsecp256k1_v0_9_2_modinv64_signed62 *g, const rustsecp256k1_v0_9_2_modinv64_trans2x2 *t) { +static void rustsecp256k1_v0_10_0_modinv64_update_fg_62_var(int len, rustsecp256k1_v0_10_0_modinv64_signed62 *f, rustsecp256k1_v0_10_0_modinv64_signed62 *g, const rustsecp256k1_v0_10_0_modinv64_trans2x2 *t) { const uint64_t M62 = UINT64_MAX >> 2; const int64_t u = t->u, v = t->v, q = t->q, r = t->r; int64_t fi, gi; - rustsecp256k1_v0_9_2_int128 cf, cg; + rustsecp256k1_v0_10_0_int128 cf, cg; int i; VERIFY_CHECK(len > 0); /* Start computing t*[f,g]. */ fi = f->v[0]; gi = g->v[0]; - rustsecp256k1_v0_9_2_i128_mul(&cf, u, fi); - rustsecp256k1_v0_9_2_i128_accum_mul(&cf, v, gi); - rustsecp256k1_v0_9_2_i128_mul(&cg, q, fi); - rustsecp256k1_v0_9_2_i128_accum_mul(&cg, r, gi); + rustsecp256k1_v0_10_0_i128_mul(&cf, u, fi); + rustsecp256k1_v0_10_0_i128_accum_mul(&cf, v, gi); + rustsecp256k1_v0_10_0_i128_mul(&cg, q, fi); + rustsecp256k1_v0_10_0_i128_accum_mul(&cg, r, gi); /* Verify that the bottom 62 bits of the result are zero, and then throw them away. */ - VERIFY_CHECK((rustsecp256k1_v0_9_2_i128_to_u64(&cf) & M62) == 0); rustsecp256k1_v0_9_2_i128_rshift(&cf, 62); - VERIFY_CHECK((rustsecp256k1_v0_9_2_i128_to_u64(&cg) & M62) == 0); rustsecp256k1_v0_9_2_i128_rshift(&cg, 62); + VERIFY_CHECK((rustsecp256k1_v0_10_0_i128_to_u64(&cf) & M62) == 0); rustsecp256k1_v0_10_0_i128_rshift(&cf, 62); + VERIFY_CHECK((rustsecp256k1_v0_10_0_i128_to_u64(&cg) & M62) == 0); rustsecp256k1_v0_10_0_i128_rshift(&cg, 62); /* Now iteratively compute limb i=1..len of t*[f,g], and store them in output limb i-1 (shifting * down by 62 bits). */ for (i = 1; i < len; ++i) { fi = f->v[i]; gi = g->v[i]; - rustsecp256k1_v0_9_2_i128_accum_mul(&cf, u, fi); - rustsecp256k1_v0_9_2_i128_accum_mul(&cf, v, gi); - rustsecp256k1_v0_9_2_i128_accum_mul(&cg, q, fi); - rustsecp256k1_v0_9_2_i128_accum_mul(&cg, r, gi); - f->v[i - 1] = rustsecp256k1_v0_9_2_i128_to_u64(&cf) & M62; rustsecp256k1_v0_9_2_i128_rshift(&cf, 62); - g->v[i - 1] = rustsecp256k1_v0_9_2_i128_to_u64(&cg) & M62; rustsecp256k1_v0_9_2_i128_rshift(&cg, 62); + rustsecp256k1_v0_10_0_i128_accum_mul(&cf, u, fi); + rustsecp256k1_v0_10_0_i128_accum_mul(&cf, v, gi); + rustsecp256k1_v0_10_0_i128_accum_mul(&cg, q, fi); + rustsecp256k1_v0_10_0_i128_accum_mul(&cg, r, gi); + f->v[i - 1] = rustsecp256k1_v0_10_0_i128_to_u64(&cf) & M62; rustsecp256k1_v0_10_0_i128_rshift(&cf, 62); + g->v[i - 1] = rustsecp256k1_v0_10_0_i128_to_u64(&cg) & M62; rustsecp256k1_v0_10_0_i128_rshift(&cg, 62); } /* What remains is limb (len) of t*[f,g]; store it as output limb (len-1). */ - f->v[len - 1] = rustsecp256k1_v0_9_2_i128_to_i64(&cf); - g->v[len - 1] = rustsecp256k1_v0_9_2_i128_to_i64(&cg); + f->v[len - 1] = rustsecp256k1_v0_10_0_i128_to_i64(&cf); + g->v[len - 1] = rustsecp256k1_v0_10_0_i128_to_i64(&cg); } /* Compute the inverse of x modulo modinfo->modulus, and replace x with it (constant time in x). */ -static void rustsecp256k1_v0_9_2_modinv64(rustsecp256k1_v0_9_2_modinv64_signed62 *x, const rustsecp256k1_v0_9_2_modinv64_modinfo *modinfo) { +static void rustsecp256k1_v0_10_0_modinv64(rustsecp256k1_v0_10_0_modinv64_signed62 *x, const rustsecp256k1_v0_10_0_modinv64_modinfo *modinfo) { /* Start with d=0, e=1, f=modulus, g=x, zeta=-1. */ - rustsecp256k1_v0_9_2_modinv64_signed62 d = {{0, 0, 0, 0, 0}}; - rustsecp256k1_v0_9_2_modinv64_signed62 e = {{1, 0, 0, 0, 0}}; - rustsecp256k1_v0_9_2_modinv64_signed62 f = modinfo->modulus; - rustsecp256k1_v0_9_2_modinv64_signed62 g = *x; + rustsecp256k1_v0_10_0_modinv64_signed62 d = {{0, 0, 0, 0, 0}}; + rustsecp256k1_v0_10_0_modinv64_signed62 e = {{1, 0, 0, 0, 0}}; + rustsecp256k1_v0_10_0_modinv64_signed62 f = modinfo->modulus; + rustsecp256k1_v0_10_0_modinv64_signed62 g = *x; int i; int64_t zeta = -1; /* zeta = -(delta+1/2); delta starts at 1/2. */ /* Do 10 iterations of 59 divsteps each = 590 divsteps. This suffices for 256-bit inputs. */ for (i = 0; i < 10; ++i) { /* Compute transition matrix and new zeta after 59 divsteps. */ - rustsecp256k1_v0_9_2_modinv64_trans2x2 t; - zeta = rustsecp256k1_v0_9_2_modinv64_divsteps_59(zeta, f.v[0], g.v[0], &t); + rustsecp256k1_v0_10_0_modinv64_trans2x2 t; + zeta = rustsecp256k1_v0_10_0_modinv64_divsteps_59(zeta, f.v[0], g.v[0], &t); /* Update d,e using that transition matrix. */ - rustsecp256k1_v0_9_2_modinv64_update_de_62(&d, &e, &t, modinfo); + rustsecp256k1_v0_10_0_modinv64_update_de_62(&d, &e, &t, modinfo); /* Update f,g using that transition matrix. */ -#ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif - rustsecp256k1_v0_9_2_modinv64_update_fg_62(&f, &g, &t); -#ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */ + + rustsecp256k1_v0_10_0_modinv64_update_fg_62(&f, &g, &t); + + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */ } /* At this point sufficient iterations have been performed that g must have reached 0 * and (if g was not originally 0) f must now equal +/- GCD of the initial f, g * values i.e. +/- 1, and d now contains +/- the modular inverse. */ -#ifdef VERIFY + /* g == 0 */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&g, 5, &SECP256K1_SIGNED62_ONE, 0) == 0); + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&g, 5, &SECP256K1_SIGNED62_ONE, 0) == 0); /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&f, 5, &SECP256K1_SIGNED62_ONE, -1) == 0 || - rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&f, 5, &SECP256K1_SIGNED62_ONE, 1) == 0 || - (rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(x, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && - rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && - (rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) == 0 || - rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) == 0))); -#endif + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&f, 5, &SECP256K1_SIGNED62_ONE, -1) == 0 || + rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&f, 5, &SECP256K1_SIGNED62_ONE, 1) == 0 || + (rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(x, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && + rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && + (rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) == 0 || + rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) == 0))); /* Optionally negate d, normalize to [0,modulus), and return it. */ - rustsecp256k1_v0_9_2_modinv64_normalize_62(&d, f.v[4], modinfo); + rustsecp256k1_v0_10_0_modinv64_normalize_62(&d, f.v[4], modinfo); *x = d; } /* Compute the inverse of x modulo modinfo->modulus, and replace x with it (variable time). */ -static void rustsecp256k1_v0_9_2_modinv64_var(rustsecp256k1_v0_9_2_modinv64_signed62 *x, const rustsecp256k1_v0_9_2_modinv64_modinfo *modinfo) { +static void rustsecp256k1_v0_10_0_modinv64_var(rustsecp256k1_v0_10_0_modinv64_signed62 *x, const rustsecp256k1_v0_10_0_modinv64_modinfo *modinfo) { /* Start with d=0, e=1, f=modulus, g=x, eta=-1. */ - rustsecp256k1_v0_9_2_modinv64_signed62 d = {{0, 0, 0, 0, 0}}; - rustsecp256k1_v0_9_2_modinv64_signed62 e = {{1, 0, 0, 0, 0}}; - rustsecp256k1_v0_9_2_modinv64_signed62 f = modinfo->modulus; - rustsecp256k1_v0_9_2_modinv64_signed62 g = *x; + rustsecp256k1_v0_10_0_modinv64_signed62 d = {{0, 0, 0, 0, 0}}; + rustsecp256k1_v0_10_0_modinv64_signed62 e = {{1, 0, 0, 0, 0}}; + rustsecp256k1_v0_10_0_modinv64_signed62 f = modinfo->modulus; + rustsecp256k1_v0_10_0_modinv64_signed62 g = *x; #ifdef VERIFY int i = 0; #endif @@ -658,18 +651,17 @@ static void rustsecp256k1_v0_9_2_modinv64_var(rustsecp256k1_v0_9_2_modinv64_sign /* Do iterations of 62 divsteps each until g=0. */ while (1) { /* Compute transition matrix and new eta after 62 divsteps. */ - rustsecp256k1_v0_9_2_modinv64_trans2x2 t; - eta = rustsecp256k1_v0_9_2_modinv64_divsteps_62_var(eta, f.v[0], g.v[0], &t); + rustsecp256k1_v0_10_0_modinv64_trans2x2 t; + eta = rustsecp256k1_v0_10_0_modinv64_divsteps_62_var(eta, f.v[0], g.v[0], &t); /* Update d,e using that transition matrix. */ - rustsecp256k1_v0_9_2_modinv64_update_de_62(&d, &e, &t, modinfo); + rustsecp256k1_v0_10_0_modinv64_update_de_62(&d, &e, &t, modinfo); /* Update f,g using that transition matrix. */ -#ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif - rustsecp256k1_v0_9_2_modinv64_update_fg_62_var(len, &f, &g, &t); + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ + + rustsecp256k1_v0_10_0_modinv64_update_fg_62_var(len, &f, &g, &t); /* If the bottom limb of g is zero, there is a chance that g=0. */ if (g.v[0] == 0) { cond = 0; @@ -693,31 +685,29 @@ static void rustsecp256k1_v0_9_2_modinv64_var(rustsecp256k1_v0_9_2_modinv64_sign g.v[len - 2] |= (uint64_t)gn << 62; --len; } -#ifdef VERIFY + VERIFY_CHECK(++i < 12); /* We should never need more than 12*62 = 744 divsteps */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ } /* At this point g is 0 and (if g was not originally 0) f must now equal +/- GCD of * the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */ -#ifdef VERIFY + /* g == 0 */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&g, len, &SECP256K1_SIGNED62_ONE, 0) == 0); + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&g, len, &SECP256K1_SIGNED62_ONE, 0) == 0); /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&f, len, &SECP256K1_SIGNED62_ONE, -1) == 0 || - rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&f, len, &SECP256K1_SIGNED62_ONE, 1) == 0 || - (rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(x, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && - rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && - (rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) == 0 || - rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) == 0))); -#endif + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&f, len, &SECP256K1_SIGNED62_ONE, -1) == 0 || + rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&f, len, &SECP256K1_SIGNED62_ONE, 1) == 0 || + (rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(x, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && + rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && + (rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) == 0 || + rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) == 0))); /* Optionally negate d, normalize to [0,modulus), and return it. */ - rustsecp256k1_v0_9_2_modinv64_normalize_62(&d, f.v[len - 1], modinfo); + rustsecp256k1_v0_10_0_modinv64_normalize_62(&d, f.v[len - 1], modinfo); *x = d; } @@ -730,10 +720,10 @@ static void rustsecp256k1_v0_9_2_modinv64_var(rustsecp256k1_v0_9_2_modinv64_sign #endif /* Compute the Jacobi symbol of x modulo modinfo->modulus (variable time). gcd(x,modulus) must be 1. */ -static int rustsecp256k1_v0_9_2_jacobi64_maybe_var(const rustsecp256k1_v0_9_2_modinv64_signed62 *x, const rustsecp256k1_v0_9_2_modinv64_modinfo *modinfo) { +static int rustsecp256k1_v0_10_0_jacobi64_maybe_var(const rustsecp256k1_v0_10_0_modinv64_signed62 *x, const rustsecp256k1_v0_10_0_modinv64_modinfo *modinfo) { /* Start with f=modulus, g=x, eta=-1. */ - rustsecp256k1_v0_9_2_modinv64_signed62 f = modinfo->modulus; - rustsecp256k1_v0_9_2_modinv64_signed62 g = *x; + rustsecp256k1_v0_10_0_modinv64_signed62 f = modinfo->modulus; + rustsecp256k1_v0_10_0_modinv64_signed62 g = *x; int j, len = 5; int64_t eta = -1; /* eta = -delta; delta is initially 1 */ int64_t cond, fn, gn; @@ -750,16 +740,15 @@ static int rustsecp256k1_v0_9_2_jacobi64_maybe_var(const rustsecp256k1_v0_9_2_mo for (count = 0; count < JACOBI64_ITERATIONS; ++count) { /* Compute transition matrix and new eta after 62 posdivsteps. */ - rustsecp256k1_v0_9_2_modinv64_trans2x2 t; - eta = rustsecp256k1_v0_9_2_modinv64_posdivsteps_62_var(eta, f.v[0] | ((uint64_t)f.v[1] << 62), g.v[0] | ((uint64_t)g.v[1] << 62), &t, &jac); + rustsecp256k1_v0_10_0_modinv64_trans2x2 t; + eta = rustsecp256k1_v0_10_0_modinv64_posdivsteps_62_var(eta, f.v[0] | ((uint64_t)f.v[1] << 62), g.v[0] | ((uint64_t)g.v[1] << 62), &t, &jac); /* Update f,g using that transition matrix. */ -#ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif - rustsecp256k1_v0_9_2_modinv64_update_fg_62_var(len, &f, &g, &t); + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ + + rustsecp256k1_v0_10_0_modinv64_update_fg_62_var(len, &f, &g, &t); /* If the bottom limb of f is 1, there is a chance that f=1. */ if (f.v[0] == 1) { cond = 0; @@ -779,12 +768,11 @@ static int rustsecp256k1_v0_9_2_jacobi64_maybe_var(const rustsecp256k1_v0_9_2_mo cond |= gn; /* If so, reduce length. */ if (cond == 0) --len; -#ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */ - VERIFY_CHECK(rustsecp256k1_v0_9_2_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif + + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */ + VERIFY_CHECK(rustsecp256k1_v0_10_0_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ } /* The loop failed to converge to f=g after 1550 iterations. Return 0, indicating unknown result. */ diff --git a/secp256k1-sys/depend/secp256k1/src/modules/ecdh/Makefile.am.include b/secp256k1-sys/depend/secp256k1/src/modules/ecdh/Makefile.am.include index e1d4eab2c..bc75c44e8 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/ecdh/Makefile.am.include +++ b/secp256k1-sys/depend/secp256k1/src/modules/ecdh/Makefile.am.include @@ -1,4 +1,4 @@ -include_HEADERS += include/rustsecp256k1_v0_9_2_ecdh.h +include_HEADERS += include/rustsecp256k1_v0_10_0_ecdh.h noinst_HEADERS += src/modules/ecdh/main_impl.h noinst_HEADERS += src/modules/ecdh/tests_impl.h noinst_HEADERS += src/modules/ecdh/bench_impl.h diff --git a/secp256k1-sys/depend/secp256k1/src/modules/ecdh/bench_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/ecdh/bench_impl.h index 03a713b6f..62454b3d0 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/ecdh/bench_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/ecdh/bench_impl.h @@ -10,8 +10,8 @@ #include "../../../include/secp256k1_ecdh.h" typedef struct { - rustsecp256k1_v0_9_2_context *ctx; - rustsecp256k1_v0_9_2_pubkey point; + rustsecp256k1_v0_10_0_context *ctx; + rustsecp256k1_v0_10_0_pubkey point; unsigned char scalar[32]; } bench_ecdh_data; @@ -29,7 +29,7 @@ static void bench_ecdh_setup(void* arg) { for (i = 0; i < 32; i++) { data->scalar[i] = i + 1; } - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_parse(data->ctx, &data->point, point, sizeof(point)) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_parse(data->ctx, &data->point, point, sizeof(point)) == 1); } static void bench_ecdh(void* arg, int iters) { @@ -38,7 +38,7 @@ static void bench_ecdh(void* arg, int iters) { bench_ecdh_data *data = (bench_ecdh_data*)arg; for (i = 0; i < iters; i++) { - CHECK(rustsecp256k1_v0_9_2_ecdh(data->ctx, res, &data->point, data->scalar, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdh(data->ctx, res, &data->point, data->scalar, NULL, NULL) == 1); } } @@ -47,11 +47,11 @@ static void run_ecdh_bench(int iters, int argc, char** argv) { int d = argc == 1; /* create a context with no capabilities */ - data.ctx = rustsecp256k1_v0_9_2_context_create(SECP256K1_FLAGS_TYPE_CONTEXT); + data.ctx = rustsecp256k1_v0_10_0_context_create(SECP256K1_FLAGS_TYPE_CONTEXT); if (d || have_flag(argc, argv, "ecdh")) run_benchmark("ecdh", bench_ecdh, bench_ecdh_setup, NULL, &data, 10, iters); - rustsecp256k1_v0_9_2_context_destroy(data.ctx); + rustsecp256k1_v0_10_0_context_destroy(data.ctx); } #endif /* SECP256K1_MODULE_ECDH_BENCH_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/modules/ecdh/main_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/ecdh/main_impl.h index 0386fc1b1..ff02af60c 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/ecdh/main_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/ecdh/main_impl.h @@ -12,26 +12,26 @@ static int ecdh_hash_function_sha256(unsigned char *output, const unsigned char *x32, const unsigned char *y32, void *data) { unsigned char version = (y32[31] & 0x01) | 0x02; - rustsecp256k1_v0_9_2_sha256 sha; + rustsecp256k1_v0_10_0_sha256 sha; (void)data; - rustsecp256k1_v0_9_2_sha256_initialize(&sha); - rustsecp256k1_v0_9_2_sha256_write(&sha, &version, 1); - rustsecp256k1_v0_9_2_sha256_write(&sha, x32, 32); - rustsecp256k1_v0_9_2_sha256_finalize(&sha, output); + rustsecp256k1_v0_10_0_sha256_initialize(&sha); + rustsecp256k1_v0_10_0_sha256_write(&sha, &version, 1); + rustsecp256k1_v0_10_0_sha256_write(&sha, x32, 32); + rustsecp256k1_v0_10_0_sha256_finalize(&sha, output); return 1; } -const rustsecp256k1_v0_9_2_ecdh_hash_function rustsecp256k1_v0_9_2_ecdh_hash_function_sha256 = ecdh_hash_function_sha256; -const rustsecp256k1_v0_9_2_ecdh_hash_function rustsecp256k1_v0_9_2_ecdh_hash_function_default = ecdh_hash_function_sha256; +const rustsecp256k1_v0_10_0_ecdh_hash_function rustsecp256k1_v0_10_0_ecdh_hash_function_sha256 = ecdh_hash_function_sha256; +const rustsecp256k1_v0_10_0_ecdh_hash_function rustsecp256k1_v0_10_0_ecdh_hash_function_default = ecdh_hash_function_sha256; -int rustsecp256k1_v0_9_2_ecdh(const rustsecp256k1_v0_9_2_context* ctx, unsigned char *output, const rustsecp256k1_v0_9_2_pubkey *point, const unsigned char *scalar, rustsecp256k1_v0_9_2_ecdh_hash_function hashfp, void *data) { +int rustsecp256k1_v0_10_0_ecdh(const rustsecp256k1_v0_10_0_context* ctx, unsigned char *output, const rustsecp256k1_v0_10_0_pubkey *point, const unsigned char *scalar, rustsecp256k1_v0_10_0_ecdh_hash_function hashfp, void *data) { int ret = 0; int overflow = 0; - rustsecp256k1_v0_9_2_gej res; - rustsecp256k1_v0_9_2_ge pt; - rustsecp256k1_v0_9_2_scalar s; + rustsecp256k1_v0_10_0_gej res; + rustsecp256k1_v0_10_0_ge pt; + rustsecp256k1_v0_10_0_scalar s; unsigned char x[32]; unsigned char y[32]; @@ -41,29 +41,29 @@ int rustsecp256k1_v0_9_2_ecdh(const rustsecp256k1_v0_9_2_context* ctx, unsigned ARG_CHECK(scalar != NULL); if (hashfp == NULL) { - hashfp = rustsecp256k1_v0_9_2_ecdh_hash_function_default; + hashfp = rustsecp256k1_v0_10_0_ecdh_hash_function_default; } - rustsecp256k1_v0_9_2_pubkey_load(ctx, &pt, point); - rustsecp256k1_v0_9_2_scalar_set_b32(&s, scalar, &overflow); + rustsecp256k1_v0_10_0_pubkey_load(ctx, &pt, point); + rustsecp256k1_v0_10_0_scalar_set_b32(&s, scalar, &overflow); - overflow |= rustsecp256k1_v0_9_2_scalar_is_zero(&s); - rustsecp256k1_v0_9_2_scalar_cmov(&s, &rustsecp256k1_v0_9_2_scalar_one, overflow); + overflow |= rustsecp256k1_v0_10_0_scalar_is_zero(&s); + rustsecp256k1_v0_10_0_scalar_cmov(&s, &rustsecp256k1_v0_10_0_scalar_one, overflow); - rustsecp256k1_v0_9_2_ecmult_const(&res, &pt, &s); - rustsecp256k1_v0_9_2_ge_set_gej(&pt, &res); + rustsecp256k1_v0_10_0_ecmult_const(&res, &pt, &s); + rustsecp256k1_v0_10_0_ge_set_gej(&pt, &res); /* Compute a hash of the point */ - rustsecp256k1_v0_9_2_fe_normalize(&pt.x); - rustsecp256k1_v0_9_2_fe_normalize(&pt.y); - rustsecp256k1_v0_9_2_fe_get_b32(x, &pt.x); - rustsecp256k1_v0_9_2_fe_get_b32(y, &pt.y); + rustsecp256k1_v0_10_0_fe_normalize(&pt.x); + rustsecp256k1_v0_10_0_fe_normalize(&pt.y); + rustsecp256k1_v0_10_0_fe_get_b32(x, &pt.x); + rustsecp256k1_v0_10_0_fe_get_b32(y, &pt.y); ret = hashfp(output, x, y, data); memset(x, 0, 32); memset(y, 0, 32); - rustsecp256k1_v0_9_2_scalar_clear(&s); + rustsecp256k1_v0_10_0_scalar_clear(&s); return !!ret & !overflow; } diff --git a/secp256k1-sys/depend/secp256k1/src/modules/ecdh/tests_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/ecdh/tests_impl.h index cebe36f16..1524a079d 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/ecdh/tests_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/ecdh/tests_impl.h @@ -25,72 +25,59 @@ static int ecdh_hash_function_custom(unsigned char *output, const unsigned char } static void test_ecdh_api(void) { - /* Setup context that just counts errors */ - rustsecp256k1_v0_9_2_context *tctx = rustsecp256k1_v0_9_2_context_create(SECP256K1_CONTEXT_NONE); - rustsecp256k1_v0_9_2_pubkey point; + rustsecp256k1_v0_10_0_pubkey point; unsigned char res[32]; unsigned char s_one[32] = { 0 }; - int32_t ecount = 0; s_one[31] = 1; - rustsecp256k1_v0_9_2_context_set_error_callback(tctx, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_9_2_context_set_illegal_callback(tctx, counting_illegal_callback_fn, &ecount); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(tctx, &point, s_one) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &point, s_one) == 1); /* Check all NULLs are detected */ - CHECK(rustsecp256k1_v0_9_2_ecdh(tctx, res, &point, s_one, NULL, NULL) == 1); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_9_2_ecdh(tctx, NULL, &point, s_one, NULL, NULL) == 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_ecdh(tctx, res, NULL, s_one, NULL, NULL) == 0); - CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_9_2_ecdh(tctx, res, &point, NULL, NULL, NULL) == 0); - CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_9_2_ecdh(tctx, res, &point, s_one, NULL, NULL) == 1); - CHECK(ecount == 3); - - /* Cleanup */ - rustsecp256k1_v0_9_2_context_destroy(tctx); + CHECK(rustsecp256k1_v0_10_0_ecdh(CTX, res, &point, s_one, NULL, NULL) == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdh(CTX, NULL, &point, s_one, NULL, NULL)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdh(CTX, res, NULL, s_one, NULL, NULL)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdh(CTX, res, &point, NULL, NULL, NULL)); + CHECK(rustsecp256k1_v0_10_0_ecdh(CTX, res, &point, s_one, NULL, NULL) == 1); } static void test_ecdh_generator_basepoint(void) { unsigned char s_one[32] = { 0 }; - rustsecp256k1_v0_9_2_pubkey point[2]; + rustsecp256k1_v0_10_0_pubkey point[2]; int i; s_one[31] = 1; /* Check against pubkey creation when the basepoint is the generator */ for (i = 0; i < 2 * COUNT; ++i) { - rustsecp256k1_v0_9_2_sha256 sha; + rustsecp256k1_v0_10_0_sha256 sha; unsigned char s_b32[32]; unsigned char output_ecdh[65]; unsigned char output_ser[32]; unsigned char point_ser[65]; size_t point_ser_len = sizeof(point_ser); - rustsecp256k1_v0_9_2_scalar s; + rustsecp256k1_v0_10_0_scalar s; random_scalar_order(&s); - rustsecp256k1_v0_9_2_scalar_get_b32(s_b32, &s); + rustsecp256k1_v0_10_0_scalar_get_b32(s_b32, &s); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, &point[0], s_one) == 1); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, &point[1], s_b32) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &point[0], s_one) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &point[1], s_b32) == 1); /* compute using ECDH function with custom hash function */ - CHECK(rustsecp256k1_v0_9_2_ecdh(CTX, output_ecdh, &point[0], s_b32, ecdh_hash_function_custom, NULL) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdh(CTX, output_ecdh, &point[0], s_b32, ecdh_hash_function_custom, NULL) == 1); /* compute "explicitly" */ - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_serialize(CTX, point_ser, &point_ser_len, &point[1], SECP256K1_EC_UNCOMPRESSED) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_serialize(CTX, point_ser, &point_ser_len, &point[1], SECP256K1_EC_UNCOMPRESSED) == 1); /* compare */ - CHECK(rustsecp256k1_v0_9_2_memcmp_var(output_ecdh, point_ser, 65) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(output_ecdh, point_ser, 65) == 0); /* compute using ECDH function with default hash function */ - CHECK(rustsecp256k1_v0_9_2_ecdh(CTX, output_ecdh, &point[0], s_b32, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdh(CTX, output_ecdh, &point[0], s_b32, NULL, NULL) == 1); /* compute "explicitly" */ - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_serialize(CTX, point_ser, &point_ser_len, &point[1], SECP256K1_EC_COMPRESSED) == 1); - rustsecp256k1_v0_9_2_sha256_initialize(&sha); - rustsecp256k1_v0_9_2_sha256_write(&sha, point_ser, point_ser_len); - rustsecp256k1_v0_9_2_sha256_finalize(&sha, output_ser); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_serialize(CTX, point_ser, &point_ser_len, &point[1], SECP256K1_EC_COMPRESSED) == 1); + rustsecp256k1_v0_10_0_sha256_initialize(&sha); + rustsecp256k1_v0_10_0_sha256_write(&sha, point_ser, point_ser_len); + rustsecp256k1_v0_10_0_sha256_finalize(&sha, output_ser); /* compare */ - CHECK(rustsecp256k1_v0_9_2_memcmp_var(output_ecdh, output_ser, 32) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(output_ecdh, output_ser, 32) == 0); } } @@ -104,29 +91,29 @@ static void test_bad_scalar(void) { }; unsigned char s_rand[32] = { 0 }; unsigned char output[32]; - rustsecp256k1_v0_9_2_scalar rand; - rustsecp256k1_v0_9_2_pubkey point; + rustsecp256k1_v0_10_0_scalar rand; + rustsecp256k1_v0_10_0_pubkey point; /* Create random point */ random_scalar_order(&rand); - rustsecp256k1_v0_9_2_scalar_get_b32(s_rand, &rand); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, &point, s_rand) == 1); + rustsecp256k1_v0_10_0_scalar_get_b32(s_rand, &rand); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &point, s_rand) == 1); /* Try to multiply it by bad values */ - CHECK(rustsecp256k1_v0_9_2_ecdh(CTX, output, &point, s_zero, NULL, NULL) == 0); - CHECK(rustsecp256k1_v0_9_2_ecdh(CTX, output, &point, s_overflow, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdh(CTX, output, &point, s_zero, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdh(CTX, output, &point, s_overflow, NULL, NULL) == 0); /* ...and a good one */ s_overflow[31] -= 1; - CHECK(rustsecp256k1_v0_9_2_ecdh(CTX, output, &point, s_overflow, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdh(CTX, output, &point, s_overflow, NULL, NULL) == 1); /* Hash function failure results in ecdh failure */ - CHECK(rustsecp256k1_v0_9_2_ecdh(CTX, output, &point, s_overflow, ecdh_hash_function_test_fail, NULL) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdh(CTX, output, &point, s_overflow, ecdh_hash_function_test_fail, NULL) == 0); } /** Test that ECDH(sG, 1/s) == ECDH((1/s)G, s) == ECDH(G, 1) for a few random s. */ static void test_result_basepoint(void) { - rustsecp256k1_v0_9_2_pubkey point; - rustsecp256k1_v0_9_2_scalar rand; + rustsecp256k1_v0_10_0_pubkey point; + rustsecp256k1_v0_10_0_scalar rand; unsigned char s[32]; unsigned char s_inv[32]; unsigned char out[32]; @@ -136,22 +123,22 @@ static void test_result_basepoint(void) { unsigned char s_one[32] = { 0 }; s_one[31] = 1; - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, &point, s_one) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdh(CTX, out_base, &point, s_one, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &point, s_one) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdh(CTX, out_base, &point, s_one, NULL, NULL) == 1); for (i = 0; i < 2 * COUNT; i++) { random_scalar_order(&rand); - rustsecp256k1_v0_9_2_scalar_get_b32(s, &rand); - rustsecp256k1_v0_9_2_scalar_inverse(&rand, &rand); - rustsecp256k1_v0_9_2_scalar_get_b32(s_inv, &rand); + rustsecp256k1_v0_10_0_scalar_get_b32(s, &rand); + rustsecp256k1_v0_10_0_scalar_inverse(&rand, &rand); + rustsecp256k1_v0_10_0_scalar_get_b32(s_inv, &rand); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, &point, s) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdh(CTX, out, &point, s_inv, NULL, NULL) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(out, out_base, 32) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &point, s) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdh(CTX, out, &point, s_inv, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(out, out_base, 32) == 0); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, &point, s_inv) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdh(CTX, out_inv, &point, s, NULL, NULL) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(out_inv, out_base, 32) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &point, s_inv) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdh(CTX, out_inv, &point, s, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(out_inv, out_base, 32) == 0); } } diff --git a/secp256k1-sys/depend/secp256k1/src/modules/ellswift/Makefile.am.include b/secp256k1-sys/depend/secp256k1/src/modules/ellswift/Makefile.am.include index b408e240e..819f26b0e 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/ellswift/Makefile.am.include +++ b/secp256k1-sys/depend/secp256k1/src/modules/ellswift/Makefile.am.include @@ -1,4 +1,4 @@ -include_HEADERS += include/rustsecp256k1_v0_9_2_ellswift.h +include_HEADERS += include/rustsecp256k1_v0_10_0_ellswift.h noinst_HEADERS += src/modules/ellswift/bench_impl.h noinst_HEADERS += src/modules/ellswift/main_impl.h noinst_HEADERS += src/modules/ellswift/tests_impl.h diff --git a/secp256k1-sys/depend/secp256k1/src/modules/ellswift/bench_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/ellswift/bench_impl.h index b66d751bd..c90ba194a 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/ellswift/bench_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/ellswift/bench_impl.h @@ -9,8 +9,8 @@ #include "../../../include/secp256k1_ellswift.h" typedef struct { - rustsecp256k1_v0_9_2_context *ctx; - rustsecp256k1_v0_9_2_pubkey point[256]; + rustsecp256k1_v0_10_0_context *ctx; + rustsecp256k1_v0_10_0_pubkey point[256]; unsigned char rnd64[64]; } bench_ellswift_data; @@ -30,12 +30,12 @@ static void bench_ellswift_setup(void *arg) { memcpy(data->rnd64, init, 64); for (i = 0; i < 256; ++i) { int j; - CHECK(rustsecp256k1_v0_9_2_ellswift_decode(data->ctx, &data->point[i], data->rnd64)); + CHECK(rustsecp256k1_v0_10_0_ellswift_decode(data->ctx, &data->point[i], data->rnd64)); for (j = 0; j < 64; ++j) { data->rnd64[j] += 1; } } - CHECK(rustsecp256k1_v0_9_2_ellswift_encode(data->ctx, data->rnd64, &data->point[255], init + 16)); + CHECK(rustsecp256k1_v0_10_0_ellswift_encode(data->ctx, data->rnd64, &data->point[255], init + 16)); } static void bench_ellswift_encode(void *arg, int iters) { @@ -43,7 +43,7 @@ static void bench_ellswift_encode(void *arg, int iters) { bench_ellswift_data *data = (bench_ellswift_data*)arg; for (i = 0; i < iters; i++) { - CHECK(rustsecp256k1_v0_9_2_ellswift_encode(data->ctx, data->rnd64, &data->point[i & 255], data->rnd64 + 16)); + CHECK(rustsecp256k1_v0_10_0_ellswift_encode(data->ctx, data->rnd64, &data->point[i & 255], data->rnd64 + 16)); } } @@ -53,21 +53,21 @@ static void bench_ellswift_create(void *arg, int iters) { for (i = 0; i < iters; i++) { unsigned char buf[64]; - CHECK(rustsecp256k1_v0_9_2_ellswift_create(data->ctx, buf, data->rnd64, data->rnd64 + 32)); + CHECK(rustsecp256k1_v0_10_0_ellswift_create(data->ctx, buf, data->rnd64, data->rnd64 + 32)); memcpy(data->rnd64, buf, 64); } } static void bench_ellswift_decode(void *arg, int iters) { int i; - rustsecp256k1_v0_9_2_pubkey out; + rustsecp256k1_v0_10_0_pubkey out; size_t len; bench_ellswift_data *data = (bench_ellswift_data*)arg; for (i = 0; i < iters; i++) { - CHECK(rustsecp256k1_v0_9_2_ellswift_decode(data->ctx, &out, data->rnd64) == 1); + CHECK(rustsecp256k1_v0_10_0_ellswift_decode(data->ctx, &out, data->rnd64) == 1); len = 33; - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_serialize(data->ctx, data->rnd64 + (i % 32), &len, &out, SECP256K1_EC_COMPRESSED)); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_serialize(data->ctx, data->rnd64 + (i % 32), &len, &out, SECP256K1_EC_COMPRESSED)); } } @@ -77,13 +77,13 @@ static void bench_ellswift_xdh(void *arg, int iters) { for (i = 0; i < iters; i++) { int party = i & 1; - CHECK(rustsecp256k1_v0_9_2_ellswift_xdh(data->ctx, + CHECK(rustsecp256k1_v0_10_0_ellswift_xdh(data->ctx, data->rnd64 + (i % 33), data->rnd64, data->rnd64, data->rnd64 + ((i + 16) % 33), party, - rustsecp256k1_v0_9_2_ellswift_xdh_hash_function_bip324, + rustsecp256k1_v0_10_0_ellswift_xdh_hash_function_bip324, NULL) == 1); } } @@ -93,14 +93,14 @@ void run_ellswift_bench(int iters, int argc, char **argv) { int d = argc == 1; /* create a context with signing capabilities */ - data.ctx = rustsecp256k1_v0_9_2_context_create(SECP256K1_CONTEXT_NONE); + data.ctx = rustsecp256k1_v0_10_0_context_create(SECP256K1_CONTEXT_NONE); if (d || have_flag(argc, argv, "ellswift") || have_flag(argc, argv, "encode") || have_flag(argc, argv, "ellswift_encode")) run_benchmark("ellswift_encode", bench_ellswift_encode, bench_ellswift_setup, NULL, &data, 10, iters); if (d || have_flag(argc, argv, "ellswift") || have_flag(argc, argv, "decode") || have_flag(argc, argv, "ellswift_decode")) run_benchmark("ellswift_decode", bench_ellswift_decode, bench_ellswift_setup, NULL, &data, 10, iters); if (d || have_flag(argc, argv, "ellswift") || have_flag(argc, argv, "keygen") || have_flag(argc, argv, "ellswift_keygen")) run_benchmark("ellswift_keygen", bench_ellswift_create, bench_ellswift_setup, NULL, &data, 10, iters); if (d || have_flag(argc, argv, "ellswift") || have_flag(argc, argv, "ecdh") || have_flag(argc, argv, "ellswift_ecdh")) run_benchmark("ellswift_ecdh", bench_ellswift_xdh, bench_ellswift_setup, NULL, &data, 10, iters); - rustsecp256k1_v0_9_2_context_destroy(data.ctx); + rustsecp256k1_v0_10_0_context_destroy(data.ctx); } #endif diff --git a/secp256k1-sys/depend/secp256k1/src/modules/ellswift/main_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/ellswift/main_impl.h index f2fa7765a..ea61fa6df 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/ellswift/main_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/ellswift/main_impl.h @@ -12,16 +12,16 @@ #include "../../hash.h" /** c1 = (sqrt(-3)-1)/2 */ -static const rustsecp256k1_v0_9_2_fe rustsecp256k1_v0_9_2_ellswift_c1 = SECP256K1_FE_CONST(0x851695d4, 0x9a83f8ef, 0x919bb861, 0x53cbcb16, 0x630fb68a, 0xed0a766a, 0x3ec693d6, 0x8e6afa40); +static const rustsecp256k1_v0_10_0_fe rustsecp256k1_v0_10_0_ellswift_c1 = SECP256K1_FE_CONST(0x851695d4, 0x9a83f8ef, 0x919bb861, 0x53cbcb16, 0x630fb68a, 0xed0a766a, 0x3ec693d6, 0x8e6afa40); /** c2 = (-sqrt(-3)-1)/2 = -(c1+1) */ -static const rustsecp256k1_v0_9_2_fe rustsecp256k1_v0_9_2_ellswift_c2 = SECP256K1_FE_CONST(0x7ae96a2b, 0x657c0710, 0x6e64479e, 0xac3434e9, 0x9cf04975, 0x12f58995, 0xc1396c28, 0x719501ee); +static const rustsecp256k1_v0_10_0_fe rustsecp256k1_v0_10_0_ellswift_c2 = SECP256K1_FE_CONST(0x7ae96a2b, 0x657c0710, 0x6e64479e, 0xac3434e9, 0x9cf04975, 0x12f58995, 0xc1396c28, 0x719501ee); /** c3 = (-sqrt(-3)+1)/2 = -c1 = c2+1 */ -static const rustsecp256k1_v0_9_2_fe rustsecp256k1_v0_9_2_ellswift_c3 = SECP256K1_FE_CONST(0x7ae96a2b, 0x657c0710, 0x6e64479e, 0xac3434e9, 0x9cf04975, 0x12f58995, 0xc1396c28, 0x719501ef); +static const rustsecp256k1_v0_10_0_fe rustsecp256k1_v0_10_0_ellswift_c3 = SECP256K1_FE_CONST(0x7ae96a2b, 0x657c0710, 0x6e64479e, 0xac3434e9, 0x9cf04975, 0x12f58995, 0xc1396c28, 0x719501ef); /** c4 = (sqrt(-3)+1)/2 = -c2 = c1+1 */ -static const rustsecp256k1_v0_9_2_fe rustsecp256k1_v0_9_2_ellswift_c4 = SECP256K1_FE_CONST(0x851695d4, 0x9a83f8ef, 0x919bb861, 0x53cbcb16, 0x630fb68a, 0xed0a766a, 0x3ec693d6, 0x8e6afa41); +static const rustsecp256k1_v0_10_0_fe rustsecp256k1_v0_10_0_ellswift_c4 = SECP256K1_FE_CONST(0x851695d4, 0x9a83f8ef, 0x919bb861, 0x53cbcb16, 0x630fb68a, 0xed0a766a, 0x3ec693d6, 0x8e6afa41); /** Decode ElligatorSwift encoding (u, t) to a fraction xn/xd representing a curve X coordinate. */ -static void rustsecp256k1_v0_9_2_ellswift_xswiftec_frac_var(rustsecp256k1_v0_9_2_fe *xn, rustsecp256k1_v0_9_2_fe *xd, const rustsecp256k1_v0_9_2_fe *u, const rustsecp256k1_v0_9_2_fe *t) { +static void rustsecp256k1_v0_10_0_ellswift_xswiftec_frac_var(rustsecp256k1_v0_10_0_fe *xn, rustsecp256k1_v0_10_0_fe *xd, const rustsecp256k1_v0_10_0_fe *u, const rustsecp256k1_v0_10_0_fe *t) { /* The implemented algorithm is the following (all operations in GF(p)): * * - Let c0 = sqrt(-3) = 0xa2d2ba93507f1df233770c2a797962cc61f6d15da14ecd47d8d27ae1cd5f852. @@ -83,68 +83,67 @@ static void rustsecp256k1_v0_9_2_ellswift_xswiftec_frac_var(rustsecp256k1_v0_9_2 * - If x2 = u*(c1*s+c2*g)/(g+s) is a valid x coordinate, return it. * - Return x1 = -(x2+u). */ - rustsecp256k1_v0_9_2_fe u1, s, g, p, d, n, l; + rustsecp256k1_v0_10_0_fe u1, s, g, p, d, n, l; u1 = *u; - if (EXPECT(rustsecp256k1_v0_9_2_fe_normalizes_to_zero_var(&u1), 0)) u1 = rustsecp256k1_v0_9_2_fe_one; - rustsecp256k1_v0_9_2_fe_sqr(&s, t); - if (EXPECT(rustsecp256k1_v0_9_2_fe_normalizes_to_zero_var(t), 0)) s = rustsecp256k1_v0_9_2_fe_one; - rustsecp256k1_v0_9_2_fe_sqr(&l, &u1); /* l = u^2 */ - rustsecp256k1_v0_9_2_fe_mul(&g, &l, &u1); /* g = u^3 */ - rustsecp256k1_v0_9_2_fe_add_int(&g, SECP256K1_B); /* g = u^3 + 7 */ + if (EXPECT(rustsecp256k1_v0_10_0_fe_normalizes_to_zero_var(&u1), 0)) u1 = rustsecp256k1_v0_10_0_fe_one; + rustsecp256k1_v0_10_0_fe_sqr(&s, t); + if (EXPECT(rustsecp256k1_v0_10_0_fe_normalizes_to_zero_var(t), 0)) s = rustsecp256k1_v0_10_0_fe_one; + rustsecp256k1_v0_10_0_fe_sqr(&l, &u1); /* l = u^2 */ + rustsecp256k1_v0_10_0_fe_mul(&g, &l, &u1); /* g = u^3 */ + rustsecp256k1_v0_10_0_fe_add_int(&g, SECP256K1_B); /* g = u^3 + 7 */ p = g; /* p = g */ - rustsecp256k1_v0_9_2_fe_add(&p, &s); /* p = g+s */ - if (EXPECT(rustsecp256k1_v0_9_2_fe_normalizes_to_zero_var(&p), 0)) { - rustsecp256k1_v0_9_2_fe_mul_int(&s, 4); + rustsecp256k1_v0_10_0_fe_add(&p, &s); /* p = g+s */ + if (EXPECT(rustsecp256k1_v0_10_0_fe_normalizes_to_zero_var(&p), 0)) { + rustsecp256k1_v0_10_0_fe_mul_int(&s, 4); /* Recompute p = g+s */ p = g; /* p = g */ - rustsecp256k1_v0_9_2_fe_add(&p, &s); /* p = g+s */ + rustsecp256k1_v0_10_0_fe_add(&p, &s); /* p = g+s */ } - rustsecp256k1_v0_9_2_fe_mul(&d, &s, &l); /* d = s*u^2 */ - rustsecp256k1_v0_9_2_fe_mul_int(&d, 3); /* d = 3*s*u^2 */ - rustsecp256k1_v0_9_2_fe_sqr(&l, &p); /* l = (g+s)^2 */ - rustsecp256k1_v0_9_2_fe_negate(&l, &l, 1); /* l = -(g+s)^2 */ - rustsecp256k1_v0_9_2_fe_mul(&n, &d, &u1); /* n = 3*s*u^3 */ - rustsecp256k1_v0_9_2_fe_add(&n, &l); /* n = 3*s*u^3-(g+s)^2 */ - if (rustsecp256k1_v0_9_2_ge_x_frac_on_curve_var(&n, &d)) { + rustsecp256k1_v0_10_0_fe_mul(&d, &s, &l); /* d = s*u^2 */ + rustsecp256k1_v0_10_0_fe_mul_int(&d, 3); /* d = 3*s*u^2 */ + rustsecp256k1_v0_10_0_fe_sqr(&l, &p); /* l = (g+s)^2 */ + rustsecp256k1_v0_10_0_fe_negate(&l, &l, 1); /* l = -(g+s)^2 */ + rustsecp256k1_v0_10_0_fe_mul(&n, &d, &u1); /* n = 3*s*u^3 */ + rustsecp256k1_v0_10_0_fe_add(&n, &l); /* n = 3*s*u^3-(g+s)^2 */ + if (rustsecp256k1_v0_10_0_ge_x_frac_on_curve_var(&n, &d)) { /* Return x3 = n/d = (3*s*u^3-(g+s)^2)/(3*s*u^2) */ *xn = n; *xd = d; return; } *xd = p; - rustsecp256k1_v0_9_2_fe_mul(&l, &rustsecp256k1_v0_9_2_ellswift_c1, &s); /* l = c1*s */ - rustsecp256k1_v0_9_2_fe_mul(&n, &rustsecp256k1_v0_9_2_ellswift_c2, &g); /* n = c2*g */ - rustsecp256k1_v0_9_2_fe_add(&n, &l); /* n = c1*s+c2*g */ - rustsecp256k1_v0_9_2_fe_mul(&n, &n, &u1); /* n = u*(c1*s+c2*g) */ + rustsecp256k1_v0_10_0_fe_mul(&l, &rustsecp256k1_v0_10_0_ellswift_c1, &s); /* l = c1*s */ + rustsecp256k1_v0_10_0_fe_mul(&n, &rustsecp256k1_v0_10_0_ellswift_c2, &g); /* n = c2*g */ + rustsecp256k1_v0_10_0_fe_add(&n, &l); /* n = c1*s+c2*g */ + rustsecp256k1_v0_10_0_fe_mul(&n, &n, &u1); /* n = u*(c1*s+c2*g) */ /* Possible optimization: in the invocation below, p^2 = (g+s)^2 is computed, * which we already have computed above. This could be deduplicated. */ - if (rustsecp256k1_v0_9_2_ge_x_frac_on_curve_var(&n, &p)) { + if (rustsecp256k1_v0_10_0_ge_x_frac_on_curve_var(&n, &p)) { /* Return x2 = n/p = u*(c1*s+c2*g)/(g+s) */ *xn = n; return; } - rustsecp256k1_v0_9_2_fe_mul(&l, &p, &u1); /* l = u*(g+s) */ - rustsecp256k1_v0_9_2_fe_add(&n, &l); /* n = u*(c1*s+c2*g)+u*(g+s) */ - rustsecp256k1_v0_9_2_fe_negate(xn, &n, 2); /* n = -u*(c1*s+c2*g)-u*(g+s) */ -#ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_9_2_ge_x_frac_on_curve_var(xn, &p)); -#endif + rustsecp256k1_v0_10_0_fe_mul(&l, &p, &u1); /* l = u*(g+s) */ + rustsecp256k1_v0_10_0_fe_add(&n, &l); /* n = u*(c1*s+c2*g)+u*(g+s) */ + rustsecp256k1_v0_10_0_fe_negate(xn, &n, 2); /* n = -u*(c1*s+c2*g)-u*(g+s) */ + + VERIFY_CHECK(rustsecp256k1_v0_10_0_ge_x_frac_on_curve_var(xn, &p)); /* Return x3 = n/p = -(u*(c1*s+c2*g)/(g+s)+u) */ } /** Decode ElligatorSwift encoding (u, t) to X coordinate. */ -static void rustsecp256k1_v0_9_2_ellswift_xswiftec_var(rustsecp256k1_v0_9_2_fe *x, const rustsecp256k1_v0_9_2_fe *u, const rustsecp256k1_v0_9_2_fe *t) { - rustsecp256k1_v0_9_2_fe xn, xd; - rustsecp256k1_v0_9_2_ellswift_xswiftec_frac_var(&xn, &xd, u, t); - rustsecp256k1_v0_9_2_fe_inv_var(&xd, &xd); - rustsecp256k1_v0_9_2_fe_mul(x, &xn, &xd); +static void rustsecp256k1_v0_10_0_ellswift_xswiftec_var(rustsecp256k1_v0_10_0_fe *x, const rustsecp256k1_v0_10_0_fe *u, const rustsecp256k1_v0_10_0_fe *t) { + rustsecp256k1_v0_10_0_fe xn, xd; + rustsecp256k1_v0_10_0_ellswift_xswiftec_frac_var(&xn, &xd, u, t); + rustsecp256k1_v0_10_0_fe_inv_var(&xd, &xd); + rustsecp256k1_v0_10_0_fe_mul(x, &xn, &xd); } /** Decode ElligatorSwift encoding (u, t) to point P. */ -static void rustsecp256k1_v0_9_2_ellswift_swiftec_var(rustsecp256k1_v0_9_2_ge *p, const rustsecp256k1_v0_9_2_fe *u, const rustsecp256k1_v0_9_2_fe *t) { - rustsecp256k1_v0_9_2_fe x; - rustsecp256k1_v0_9_2_ellswift_xswiftec_var(&x, u, t); - rustsecp256k1_v0_9_2_ge_set_xo_var(p, &x, rustsecp256k1_v0_9_2_fe_is_odd(t)); +static void rustsecp256k1_v0_10_0_ellswift_swiftec_var(rustsecp256k1_v0_10_0_ge *p, const rustsecp256k1_v0_10_0_fe *u, const rustsecp256k1_v0_10_0_fe *t) { + rustsecp256k1_v0_10_0_fe x; + rustsecp256k1_v0_10_0_ellswift_xswiftec_var(&x, u, t); + rustsecp256k1_v0_10_0_ge_set_xo_var(p, &x, rustsecp256k1_v0_10_0_fe_is_odd(t)); } /* Try to complete an ElligatorSwift encoding (u, t) for X coordinate x, given u and x. @@ -154,7 +153,7 @@ static void rustsecp256k1_v0_9_2_ellswift_swiftec_var(rustsecp256k1_v0_9_2_ge *p * distinct input argument c (in range 0-7), and some or all of these may return failure. * The following guarantees exist: * - Given (x, u), no two distinct c values give the same successful result t. - * - Every successful result maps back to x through rustsecp256k1_v0_9_2_ellswift_xswiftec_var. + * - Every successful result maps back to x through rustsecp256k1_v0_10_0_ellswift_xswiftec_var. * - Given (x, u), all t values that map back to x can be reached by combining the * successful results from this function over all c values, with the exception of: * - this function cannot be called with u=0 @@ -166,7 +165,7 @@ static void rustsecp256k1_v0_9_2_ellswift_swiftec_var(rustsecp256k1_v0_9_2_ge *p * encoding more closely: c=0 through c=3 match branches 1..4 in the paper, while c=4 through * c=7 are copies of those with an additional negation of sqrt(w). */ -static int rustsecp256k1_v0_9_2_ellswift_xswiftec_inv_var(rustsecp256k1_v0_9_2_fe *t, const rustsecp256k1_v0_9_2_fe *x_in, const rustsecp256k1_v0_9_2_fe *u_in, int c) { +static int rustsecp256k1_v0_10_0_ellswift_xswiftec_inv_var(rustsecp256k1_v0_10_0_fe *t, const rustsecp256k1_v0_10_0_fe *x_in, const rustsecp256k1_v0_10_0_fe *u_in, int c) { /* The implemented algorithm is this (all arithmetic, except involving c, is mod p): * * - If (c & 2) = 0: @@ -187,16 +186,14 @@ static int rustsecp256k1_v0_9_2_ellswift_xswiftec_inv_var(rustsecp256k1_v0_9_2_f * - If (c & 5) = 4: return w*(c3*u + v). * - If (c & 5) = 5: return -w*(c4*u + v). */ - rustsecp256k1_v0_9_2_fe x = *x_in, u = *u_in, g, v, s, m, r, q; + rustsecp256k1_v0_10_0_fe x = *x_in, u = *u_in, g, v, s, m, r, q; int ret; - rustsecp256k1_v0_9_2_fe_normalize_weak(&x); - rustsecp256k1_v0_9_2_fe_normalize_weak(&u); + rustsecp256k1_v0_10_0_fe_normalize_weak(&x); + rustsecp256k1_v0_10_0_fe_normalize_weak(&u); -#ifdef VERIFY VERIFY_CHECK(c >= 0 && c < 8); - VERIFY_CHECK(rustsecp256k1_v0_9_2_ge_x_on_curve_var(&x)); -#endif + VERIFY_CHECK(rustsecp256k1_v0_10_0_ge_x_on_curve_var(&x)); if (!(c & 2)) { /* c is in {0, 1, 4, 5}. In this case we look for an inverse under the x1 (if c=0 or @@ -206,16 +203,16 @@ static int rustsecp256k1_v0_9_2_ellswift_xswiftec_inv_var(rustsecp256k1_v0_9_2_f * back under the x3 formula instead (which has priority over x1 and x2, so the decoding * would not match x). */ m = x; /* m = x */ - rustsecp256k1_v0_9_2_fe_add(&m, &u); /* m = u+x */ - rustsecp256k1_v0_9_2_fe_negate(&m, &m, 2); /* m = -u-x */ + rustsecp256k1_v0_10_0_fe_add(&m, &u); /* m = u+x */ + rustsecp256k1_v0_10_0_fe_negate(&m, &m, 2); /* m = -u-x */ /* Test if (-u-x) is a valid X coordinate. If so, fail. */ - if (rustsecp256k1_v0_9_2_ge_x_on_curve_var(&m)) return 0; + if (rustsecp256k1_v0_10_0_ge_x_on_curve_var(&m)) return 0; /* Let s = -(u^3 + 7)/(u^2 + u*x + x^2) [first part] */ - rustsecp256k1_v0_9_2_fe_sqr(&s, &m); /* s = (u+x)^2 */ - rustsecp256k1_v0_9_2_fe_negate(&s, &s, 1); /* s = -(u+x)^2 */ - rustsecp256k1_v0_9_2_fe_mul(&m, &u, &x); /* m = u*x */ - rustsecp256k1_v0_9_2_fe_add(&s, &m); /* s = -(u^2 + u*x + x^2) */ + rustsecp256k1_v0_10_0_fe_sqr(&s, &m); /* s = (u+x)^2 */ + rustsecp256k1_v0_10_0_fe_negate(&s, &s, 1); /* s = -(u+x)^2 */ + rustsecp256k1_v0_10_0_fe_mul(&m, &u, &x); /* m = u*x */ + rustsecp256k1_v0_10_0_fe_add(&s, &m); /* s = -(u^2 + u*x + x^2) */ /* Note that at this point, s = 0 is impossible. If it were the case: * s = -(u^2 + u*x + x^2) = 0 @@ -227,25 +224,23 @@ static int rustsecp256k1_v0_9_2_ellswift_xswiftec_inv_var(rustsecp256k1_v0_9_2_f * => x^3 + B = (-u - x)^3 + B * * However, we know x^3 + B is square (because x is on the curve) and - * that (-u-x)^3 + B is not square (the rustsecp256k1_v0_9_2_ge_x_on_curve_var(&m) + * that (-u-x)^3 + B is not square (the rustsecp256k1_v0_10_0_ge_x_on_curve_var(&m) * test above would have failed). This is a contradiction, and thus the * assumption s=0 is false. */ -#ifdef VERIFY - VERIFY_CHECK(!rustsecp256k1_v0_9_2_fe_normalizes_to_zero_var(&s)); -#endif + VERIFY_CHECK(!rustsecp256k1_v0_10_0_fe_normalizes_to_zero_var(&s)); /* If s is not square, fail. We have not fully computed s yet, but s is square iff * -(u^3+7)*(u^2+u*x+x^2) is square (because a/b is square iff a*b is square and b is * nonzero). */ - rustsecp256k1_v0_9_2_fe_sqr(&g, &u); /* g = u^2 */ - rustsecp256k1_v0_9_2_fe_mul(&g, &g, &u); /* g = u^3 */ - rustsecp256k1_v0_9_2_fe_add_int(&g, SECP256K1_B); /* g = u^3+7 */ - rustsecp256k1_v0_9_2_fe_mul(&m, &s, &g); /* m = -(u^3 + 7)*(u^2 + u*x + x^2) */ - if (!rustsecp256k1_v0_9_2_fe_is_square_var(&m)) return 0; + rustsecp256k1_v0_10_0_fe_sqr(&g, &u); /* g = u^2 */ + rustsecp256k1_v0_10_0_fe_mul(&g, &g, &u); /* g = u^3 */ + rustsecp256k1_v0_10_0_fe_add_int(&g, SECP256K1_B); /* g = u^3+7 */ + rustsecp256k1_v0_10_0_fe_mul(&m, &s, &g); /* m = -(u^3 + 7)*(u^2 + u*x + x^2) */ + if (!rustsecp256k1_v0_10_0_fe_is_square_var(&m)) return 0; /* Let s = -(u^3 + 7)/(u^2 + u*x + x^2) [second part] */ - rustsecp256k1_v0_9_2_fe_inv_var(&s, &s); /* s = -1/(u^2 + u*x + x^2) [no div by 0] */ - rustsecp256k1_v0_9_2_fe_mul(&s, &s, &g); /* s = -(u^3 + 7)/(u^2 + u*x + x^2) */ + rustsecp256k1_v0_10_0_fe_inv_var(&s, &s); /* s = -1/(u^2 + u*x + x^2) [no div by 0] */ + rustsecp256k1_v0_10_0_fe_mul(&s, &s, &g); /* s = -(u^3 + 7)/(u^2 + u*x + x^2) */ /* Let v = x. */ v = x; @@ -253,53 +248,57 @@ static int rustsecp256k1_v0_9_2_ellswift_xswiftec_inv_var(rustsecp256k1_v0_9_2_f /* c is in {2, 3, 6, 7}. In this case we look for an inverse under the x3 formula. */ /* Let s = x-u. */ - rustsecp256k1_v0_9_2_fe_negate(&m, &u, 1); /* m = -u */ + rustsecp256k1_v0_10_0_fe_negate(&m, &u, 1); /* m = -u */ s = m; /* s = -u */ - rustsecp256k1_v0_9_2_fe_add(&s, &x); /* s = x-u */ + rustsecp256k1_v0_10_0_fe_add(&s, &x); /* s = x-u */ /* If s is not square, fail. */ - if (!rustsecp256k1_v0_9_2_fe_is_square_var(&s)) return 0; + if (!rustsecp256k1_v0_10_0_fe_is_square_var(&s)) return 0; /* Let r = sqrt(-s*(4*(u^3+7)+3*u^2*s)); fail if it doesn't exist. */ - rustsecp256k1_v0_9_2_fe_sqr(&g, &u); /* g = u^2 */ - rustsecp256k1_v0_9_2_fe_mul(&q, &s, &g); /* q = s*u^2 */ - rustsecp256k1_v0_9_2_fe_mul_int(&q, 3); /* q = 3*s*u^2 */ - rustsecp256k1_v0_9_2_fe_mul(&g, &g, &u); /* g = u^3 */ - rustsecp256k1_v0_9_2_fe_mul_int(&g, 4); /* g = 4*u^3 */ - rustsecp256k1_v0_9_2_fe_add_int(&g, 4 * SECP256K1_B); /* g = 4*(u^3+7) */ - rustsecp256k1_v0_9_2_fe_add(&q, &g); /* q = 4*(u^3+7)+3*s*u^2 */ - rustsecp256k1_v0_9_2_fe_mul(&q, &q, &s); /* q = s*(4*(u^3+7)+3*u^2*s) */ - rustsecp256k1_v0_9_2_fe_negate(&q, &q, 1); /* q = -s*(4*(u^3+7)+3*u^2*s) */ - if (!rustsecp256k1_v0_9_2_fe_is_square_var(&q)) return 0; - ret = rustsecp256k1_v0_9_2_fe_sqrt(&r, &q); /* r = sqrt(-s*(4*(u^3+7)+3*u^2*s)) */ + rustsecp256k1_v0_10_0_fe_sqr(&g, &u); /* g = u^2 */ + rustsecp256k1_v0_10_0_fe_mul(&q, &s, &g); /* q = s*u^2 */ + rustsecp256k1_v0_10_0_fe_mul_int(&q, 3); /* q = 3*s*u^2 */ + rustsecp256k1_v0_10_0_fe_mul(&g, &g, &u); /* g = u^3 */ + rustsecp256k1_v0_10_0_fe_mul_int(&g, 4); /* g = 4*u^3 */ + rustsecp256k1_v0_10_0_fe_add_int(&g, 4 * SECP256K1_B); /* g = 4*(u^3+7) */ + rustsecp256k1_v0_10_0_fe_add(&q, &g); /* q = 4*(u^3+7)+3*s*u^2 */ + rustsecp256k1_v0_10_0_fe_mul(&q, &q, &s); /* q = s*(4*(u^3+7)+3*u^2*s) */ + rustsecp256k1_v0_10_0_fe_negate(&q, &q, 1); /* q = -s*(4*(u^3+7)+3*u^2*s) */ + if (!rustsecp256k1_v0_10_0_fe_is_square_var(&q)) return 0; + ret = rustsecp256k1_v0_10_0_fe_sqrt(&r, &q); /* r = sqrt(-s*(4*(u^3+7)+3*u^2*s)) */ +#ifdef VERIFY VERIFY_CHECK(ret); +#else + (void)ret; +#endif /* If (c & 1) = 1 and r = 0, fail. */ - if (EXPECT((c & 1) && rustsecp256k1_v0_9_2_fe_normalizes_to_zero_var(&r), 0)) return 0; + if (EXPECT((c & 1) && rustsecp256k1_v0_10_0_fe_normalizes_to_zero_var(&r), 0)) return 0; /* If s = 0, fail. */ - if (EXPECT(rustsecp256k1_v0_9_2_fe_normalizes_to_zero_var(&s), 0)) return 0; + if (EXPECT(rustsecp256k1_v0_10_0_fe_normalizes_to_zero_var(&s), 0)) return 0; /* Let v = (r/s-u)/2. */ - rustsecp256k1_v0_9_2_fe_inv_var(&v, &s); /* v = 1/s [no div by 0] */ - rustsecp256k1_v0_9_2_fe_mul(&v, &v, &r); /* v = r/s */ - rustsecp256k1_v0_9_2_fe_add(&v, &m); /* v = r/s-u */ - rustsecp256k1_v0_9_2_fe_half(&v); /* v = (r/s-u)/2 */ + rustsecp256k1_v0_10_0_fe_inv_var(&v, &s); /* v = 1/s [no div by 0] */ + rustsecp256k1_v0_10_0_fe_mul(&v, &v, &r); /* v = r/s */ + rustsecp256k1_v0_10_0_fe_add(&v, &m); /* v = r/s-u */ + rustsecp256k1_v0_10_0_fe_half(&v); /* v = (r/s-u)/2 */ } /* Let w = sqrt(s). */ - ret = rustsecp256k1_v0_9_2_fe_sqrt(&m, &s); /* m = sqrt(s) = w */ + ret = rustsecp256k1_v0_10_0_fe_sqrt(&m, &s); /* m = sqrt(s) = w */ VERIFY_CHECK(ret); /* Return logic. */ if ((c & 5) == 0 || (c & 5) == 5) { - rustsecp256k1_v0_9_2_fe_negate(&m, &m, 1); /* m = -w */ + rustsecp256k1_v0_10_0_fe_negate(&m, &m, 1); /* m = -w */ } /* Now m = {-w if c&5=0 or c&5=5; w otherwise}. */ - rustsecp256k1_v0_9_2_fe_mul(&u, &u, c&1 ? &rustsecp256k1_v0_9_2_ellswift_c4 : &rustsecp256k1_v0_9_2_ellswift_c3); + rustsecp256k1_v0_10_0_fe_mul(&u, &u, c&1 ? &rustsecp256k1_v0_10_0_ellswift_c4 : &rustsecp256k1_v0_10_0_ellswift_c3); /* u = {c4 if c&1=1; c3 otherwise}*u */ - rustsecp256k1_v0_9_2_fe_add(&u, &v); /* u = {c4 if c&1=1; c3 otherwise}*u + v */ - rustsecp256k1_v0_9_2_fe_mul(t, &m, &u); + rustsecp256k1_v0_10_0_fe_add(&u, &v); /* u = {c4 if c&1=1; c3 otherwise}*u + v */ + rustsecp256k1_v0_10_0_fe_mul(t, &m, &u); return 1; } @@ -308,8 +307,8 @@ static int rustsecp256k1_v0_9_2_ellswift_xswiftec_inv_var(rustsecp256k1_v0_9_2_f * hasher is a SHA256 object to which an incrementing 4-byte counter is written to generate randomness. * Writing 13 bytes (4 bytes for counter, plus 9 bytes for the SHA256 padding) cannot cross a * 64-byte block size boundary (to make sure it only triggers a single SHA256 compression). */ -static void rustsecp256k1_v0_9_2_ellswift_prng(unsigned char* out32, const rustsecp256k1_v0_9_2_sha256 *hasher, uint32_t cnt) { - rustsecp256k1_v0_9_2_sha256 hash = *hasher; +static void rustsecp256k1_v0_10_0_ellswift_prng(unsigned char* out32, const rustsecp256k1_v0_10_0_sha256 *hasher, uint32_t cnt) { + rustsecp256k1_v0_10_0_sha256 hash = *hasher; unsigned char buf4[4]; #ifdef VERIFY size_t blocks = hash.bytes >> 6; @@ -318,12 +317,11 @@ static void rustsecp256k1_v0_9_2_ellswift_prng(unsigned char* out32, const rusts buf4[1] = cnt >> 8; buf4[2] = cnt >> 16; buf4[3] = cnt >> 24; - rustsecp256k1_v0_9_2_sha256_write(&hash, buf4, 4); - rustsecp256k1_v0_9_2_sha256_finalize(&hash, out32); -#ifdef VERIFY + rustsecp256k1_v0_10_0_sha256_write(&hash, buf4, 4); + rustsecp256k1_v0_10_0_sha256_finalize(&hash, out32); + /* Writing and finalizing together should trigger exactly one SHA256 compression. */ VERIFY_CHECK(((hash.bytes) >> 6) == (blocks + 1)); -#endif } /** Find an ElligatorSwift encoding (u, t) for X coordinate x, and random Y coordinate. @@ -331,8 +329,8 @@ static void rustsecp256k1_v0_9_2_ellswift_prng(unsigned char* out32, const rusts * u32 is the 32-byte big endian encoding of u; t is the output field element t that still * needs encoding. * - * hasher is a hasher in the rustsecp256k1_v0_9_2_ellswift_prng sense, with the same restrictions. */ -static void rustsecp256k1_v0_9_2_ellswift_xelligatorswift_var(unsigned char *u32, rustsecp256k1_v0_9_2_fe *t, const rustsecp256k1_v0_9_2_fe *x, const rustsecp256k1_v0_9_2_sha256 *hasher) { + * hasher is a hasher in the rustsecp256k1_v0_10_0_ellswift_prng sense, with the same restrictions. */ +static void rustsecp256k1_v0_10_0_ellswift_xelligatorswift_var(unsigned char *u32, rustsecp256k1_v0_10_0_fe *t, const rustsecp256k1_v0_10_0_fe *x, const rustsecp256k1_v0_10_0_sha256 *hasher) { /* Pool of 3-bit branch values. */ unsigned char branch_hash[32]; /* Number of 3-bit values in branch_hash left. */ @@ -345,48 +343,47 @@ static void rustsecp256k1_v0_9_2_ellswift_xelligatorswift_var(unsigned char *u32 uint32_t cnt = 0; while (1) { int branch; - rustsecp256k1_v0_9_2_fe u; + rustsecp256k1_v0_10_0_fe u; /* If the pool of branch values is empty, populate it. */ if (branches_left == 0) { - rustsecp256k1_v0_9_2_ellswift_prng(branch_hash, hasher, cnt++); + rustsecp256k1_v0_10_0_ellswift_prng(branch_hash, hasher, cnt++); branches_left = 64; } /* Take a 3-bit branch value from the branch pool (top bit is discarded). */ --branches_left; branch = (branch_hash[branches_left >> 1] >> ((branches_left & 1) << 2)) & 7; /* Compute a new u value by hashing. */ - rustsecp256k1_v0_9_2_ellswift_prng(u32, hasher, cnt++); + rustsecp256k1_v0_10_0_ellswift_prng(u32, hasher, cnt++); /* overflow is not a problem (we prefer uniform u32 over uniform u). */ - rustsecp256k1_v0_9_2_fe_set_b32_mod(&u, u32); + rustsecp256k1_v0_10_0_fe_set_b32_mod(&u, u32); /* Since u is the output of a hash, it should practically never be 0. We could apply the * u=0 to u=1 correction here too to deal with that case still, but it's such a low * probability event that we do not bother. */ -#ifdef VERIFY - VERIFY_CHECK(!rustsecp256k1_v0_9_2_fe_normalizes_to_zero_var(&u)); -#endif + VERIFY_CHECK(!rustsecp256k1_v0_10_0_fe_normalizes_to_zero_var(&u)); + /* Find a remainder t, and return it if found. */ - if (EXPECT(rustsecp256k1_v0_9_2_ellswift_xswiftec_inv_var(t, x, &u, branch), 0)) break; + if (EXPECT(rustsecp256k1_v0_10_0_ellswift_xswiftec_inv_var(t, x, &u, branch), 0)) break; } } /** Find an ElligatorSwift encoding (u, t) for point P. * - * This is similar rustsecp256k1_v0_9_2_ellswift_xelligatorswift_var, except it takes a full group element p + * This is similar rustsecp256k1_v0_10_0_ellswift_xelligatorswift_var, except it takes a full group element p * as input, and returns an encoding that matches the provided Y coordinate rather than a random * one. */ -static void rustsecp256k1_v0_9_2_ellswift_elligatorswift_var(unsigned char *u32, rustsecp256k1_v0_9_2_fe *t, const rustsecp256k1_v0_9_2_ge *p, const rustsecp256k1_v0_9_2_sha256 *hasher) { - rustsecp256k1_v0_9_2_ellswift_xelligatorswift_var(u32, t, &p->x, hasher); - rustsecp256k1_v0_9_2_fe_normalize_var(t); - if (rustsecp256k1_v0_9_2_fe_is_odd(t) != rustsecp256k1_v0_9_2_fe_is_odd(&p->y)) { - rustsecp256k1_v0_9_2_fe_negate(t, t, 1); - rustsecp256k1_v0_9_2_fe_normalize_var(t); +static void rustsecp256k1_v0_10_0_ellswift_elligatorswift_var(unsigned char *u32, rustsecp256k1_v0_10_0_fe *t, const rustsecp256k1_v0_10_0_ge *p, const rustsecp256k1_v0_10_0_sha256 *hasher) { + rustsecp256k1_v0_10_0_ellswift_xelligatorswift_var(u32, t, &p->x, hasher); + rustsecp256k1_v0_10_0_fe_normalize_var(t); + if (rustsecp256k1_v0_10_0_fe_is_odd(t) != rustsecp256k1_v0_10_0_fe_is_odd(&p->y)) { + rustsecp256k1_v0_10_0_fe_negate(t, t, 1); + rustsecp256k1_v0_10_0_fe_normalize_var(t); } } -/** Set hash state to the BIP340 tagged hash midstate for "rustsecp256k1_v0_9_2_ellswift_encode". */ -static void rustsecp256k1_v0_9_2_ellswift_sha256_init_encode(rustsecp256k1_v0_9_2_sha256* hash) { - rustsecp256k1_v0_9_2_sha256_initialize(hash); +/** Set hash state to the BIP340 tagged hash midstate for "rustsecp256k1_v0_10_0_ellswift_encode". */ +static void rustsecp256k1_v0_10_0_ellswift_sha256_init_encode(rustsecp256k1_v0_10_0_sha256* hash) { + rustsecp256k1_v0_10_0_sha256_initialize(hash); hash->s[0] = 0xd1a6524bul; hash->s[1] = 0x028594b3ul; hash->s[2] = 0x96e42f4eul; @@ -399,31 +396,35 @@ static void rustsecp256k1_v0_9_2_ellswift_sha256_init_encode(rustsecp256k1_v0_9_ hash->bytes = 64; } -int rustsecp256k1_v0_9_2_ellswift_encode(const rustsecp256k1_v0_9_2_context *ctx, unsigned char *ell64, const rustsecp256k1_v0_9_2_pubkey *pubkey, const unsigned char *rnd32) { - rustsecp256k1_v0_9_2_ge p; +int rustsecp256k1_v0_10_0_ellswift_encode(const rustsecp256k1_v0_10_0_context *ctx, unsigned char *ell64, const rustsecp256k1_v0_10_0_pubkey *pubkey, const unsigned char *rnd32) { + rustsecp256k1_v0_10_0_ge p; VERIFY_CHECK(ctx != NULL); ARG_CHECK(ell64 != NULL); ARG_CHECK(pubkey != NULL); ARG_CHECK(rnd32 != NULL); - if (rustsecp256k1_v0_9_2_pubkey_load(ctx, &p, pubkey)) { - rustsecp256k1_v0_9_2_fe t; + if (rustsecp256k1_v0_10_0_pubkey_load(ctx, &p, pubkey)) { + rustsecp256k1_v0_10_0_fe t; unsigned char p64[64] = {0}; size_t ser_size; int ser_ret; - rustsecp256k1_v0_9_2_sha256 hash; + rustsecp256k1_v0_10_0_sha256 hash; /* Set up hasher state; the used RNG is H(pubkey || "\x00"*31 || rnd32 || cnt++), using - * BIP340 tagged hash with tag "rustsecp256k1_v0_9_2_ellswift_encode". */ - rustsecp256k1_v0_9_2_ellswift_sha256_init_encode(&hash); - ser_ret = rustsecp256k1_v0_9_2_eckey_pubkey_serialize(&p, p64, &ser_size, 1); + * BIP340 tagged hash with tag "rustsecp256k1_v0_10_0_ellswift_encode". */ + rustsecp256k1_v0_10_0_ellswift_sha256_init_encode(&hash); + ser_ret = rustsecp256k1_v0_10_0_eckey_pubkey_serialize(&p, p64, &ser_size, 1); +#ifdef VERIFY VERIFY_CHECK(ser_ret && ser_size == 33); - rustsecp256k1_v0_9_2_sha256_write(&hash, p64, sizeof(p64)); - rustsecp256k1_v0_9_2_sha256_write(&hash, rnd32, 32); +#else + (void)ser_ret; +#endif + rustsecp256k1_v0_10_0_sha256_write(&hash, p64, sizeof(p64)); + rustsecp256k1_v0_10_0_sha256_write(&hash, rnd32, 32); /* Compute ElligatorSwift encoding and construct output. */ - rustsecp256k1_v0_9_2_ellswift_elligatorswift_var(ell64, &t, &p, &hash); /* puts u in ell64[0..32] */ - rustsecp256k1_v0_9_2_fe_get_b32(ell64 + 32, &t); /* puts t in ell64[32..64] */ + rustsecp256k1_v0_10_0_ellswift_elligatorswift_var(ell64, &t, &p, &hash); /* puts u in ell64[0..32] */ + rustsecp256k1_v0_10_0_fe_get_b32(ell64 + 32, &t); /* puts t in ell64[32..64] */ return 1; } /* Only reached in case the provided pubkey is invalid. */ @@ -431,9 +432,9 @@ int rustsecp256k1_v0_9_2_ellswift_encode(const rustsecp256k1_v0_9_2_context *ctx return 0; } -/** Set hash state to the BIP340 tagged hash midstate for "rustsecp256k1_v0_9_2_ellswift_create". */ -static void rustsecp256k1_v0_9_2_ellswift_sha256_init_create(rustsecp256k1_v0_9_2_sha256* hash) { - rustsecp256k1_v0_9_2_sha256_initialize(hash); +/** Set hash state to the BIP340 tagged hash midstate for "rustsecp256k1_v0_10_0_ellswift_create". */ +static void rustsecp256k1_v0_10_0_ellswift_sha256_init_create(rustsecp256k1_v0_10_0_sha256* hash) { + rustsecp256k1_v0_10_0_sha256_initialize(hash); hash->s[0] = 0xd29e1bf5ul; hash->s[1] = 0xf7025f42ul; hash->s[2] = 0x9b024773ul; @@ -446,11 +447,11 @@ static void rustsecp256k1_v0_9_2_ellswift_sha256_init_create(rustsecp256k1_v0_9_ hash->bytes = 64; } -int rustsecp256k1_v0_9_2_ellswift_create(const rustsecp256k1_v0_9_2_context *ctx, unsigned char *ell64, const unsigned char *seckey32, const unsigned char *auxrnd32) { - rustsecp256k1_v0_9_2_ge p; - rustsecp256k1_v0_9_2_fe t; - rustsecp256k1_v0_9_2_sha256 hash; - rustsecp256k1_v0_9_2_scalar seckey_scalar; +int rustsecp256k1_v0_10_0_ellswift_create(const rustsecp256k1_v0_10_0_context *ctx, unsigned char *ell64, const unsigned char *seckey32, const unsigned char *auxrnd32) { + rustsecp256k1_v0_10_0_ge p; + rustsecp256k1_v0_10_0_fe t; + rustsecp256k1_v0_10_0_sha256 hash; + rustsecp256k1_v0_10_0_scalar seckey_scalar; int ret; static const unsigned char zero32[32] = {0}; @@ -458,64 +459,64 @@ int rustsecp256k1_v0_9_2_ellswift_create(const rustsecp256k1_v0_9_2_context *ctx VERIFY_CHECK(ctx != NULL); ARG_CHECK(ell64 != NULL); memset(ell64, 0, 64); - ARG_CHECK(rustsecp256k1_v0_9_2_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); + ARG_CHECK(rustsecp256k1_v0_10_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); ARG_CHECK(seckey32 != NULL); /* Compute (affine) public key */ - ret = rustsecp256k1_v0_9_2_ec_pubkey_create_helper(&ctx->ecmult_gen_ctx, &seckey_scalar, &p, seckey32); - rustsecp256k1_v0_9_2_declassify(ctx, &p, sizeof(p)); /* not constant time in produced pubkey */ - rustsecp256k1_v0_9_2_fe_normalize_var(&p.x); - rustsecp256k1_v0_9_2_fe_normalize_var(&p.y); + ret = rustsecp256k1_v0_10_0_ec_pubkey_create_helper(&ctx->ecmult_gen_ctx, &seckey_scalar, &p, seckey32); + rustsecp256k1_v0_10_0_declassify(ctx, &p, sizeof(p)); /* not constant time in produced pubkey */ + rustsecp256k1_v0_10_0_fe_normalize_var(&p.x); + rustsecp256k1_v0_10_0_fe_normalize_var(&p.y); /* Set up hasher state. The used RNG is H(privkey || "\x00"*32 [|| auxrnd32] || cnt++), - * using BIP340 tagged hash with tag "rustsecp256k1_v0_9_2_ellswift_create". */ - rustsecp256k1_v0_9_2_ellswift_sha256_init_create(&hash); - rustsecp256k1_v0_9_2_sha256_write(&hash, seckey32, 32); - rustsecp256k1_v0_9_2_sha256_write(&hash, zero32, sizeof(zero32)); - rustsecp256k1_v0_9_2_declassify(ctx, &hash, sizeof(hash)); /* private key is hashed now */ - if (auxrnd32) rustsecp256k1_v0_9_2_sha256_write(&hash, auxrnd32, 32); + * using BIP340 tagged hash with tag "rustsecp256k1_v0_10_0_ellswift_create". */ + rustsecp256k1_v0_10_0_ellswift_sha256_init_create(&hash); + rustsecp256k1_v0_10_0_sha256_write(&hash, seckey32, 32); + rustsecp256k1_v0_10_0_sha256_write(&hash, zero32, sizeof(zero32)); + rustsecp256k1_v0_10_0_declassify(ctx, &hash, sizeof(hash)); /* private key is hashed now */ + if (auxrnd32) rustsecp256k1_v0_10_0_sha256_write(&hash, auxrnd32, 32); /* Compute ElligatorSwift encoding and construct output. */ - rustsecp256k1_v0_9_2_ellswift_elligatorswift_var(ell64, &t, &p, &hash); /* puts u in ell64[0..32] */ - rustsecp256k1_v0_9_2_fe_get_b32(ell64 + 32, &t); /* puts t in ell64[32..64] */ + rustsecp256k1_v0_10_0_ellswift_elligatorswift_var(ell64, &t, &p, &hash); /* puts u in ell64[0..32] */ + rustsecp256k1_v0_10_0_fe_get_b32(ell64 + 32, &t); /* puts t in ell64[32..64] */ - rustsecp256k1_v0_9_2_memczero(ell64, 64, !ret); - rustsecp256k1_v0_9_2_scalar_clear(&seckey_scalar); + rustsecp256k1_v0_10_0_memczero(ell64, 64, !ret); + rustsecp256k1_v0_10_0_scalar_clear(&seckey_scalar); return ret; } -int rustsecp256k1_v0_9_2_ellswift_decode(const rustsecp256k1_v0_9_2_context *ctx, rustsecp256k1_v0_9_2_pubkey *pubkey, const unsigned char *ell64) { - rustsecp256k1_v0_9_2_fe u, t; - rustsecp256k1_v0_9_2_ge p; +int rustsecp256k1_v0_10_0_ellswift_decode(const rustsecp256k1_v0_10_0_context *ctx, rustsecp256k1_v0_10_0_pubkey *pubkey, const unsigned char *ell64) { + rustsecp256k1_v0_10_0_fe u, t; + rustsecp256k1_v0_10_0_ge p; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); ARG_CHECK(ell64 != NULL); - rustsecp256k1_v0_9_2_fe_set_b32_mod(&u, ell64); - rustsecp256k1_v0_9_2_fe_set_b32_mod(&t, ell64 + 32); - rustsecp256k1_v0_9_2_fe_normalize_var(&t); - rustsecp256k1_v0_9_2_ellswift_swiftec_var(&p, &u, &t); - rustsecp256k1_v0_9_2_pubkey_save(pubkey, &p); + rustsecp256k1_v0_10_0_fe_set_b32_mod(&u, ell64); + rustsecp256k1_v0_10_0_fe_set_b32_mod(&t, ell64 + 32); + rustsecp256k1_v0_10_0_fe_normalize_var(&t); + rustsecp256k1_v0_10_0_ellswift_swiftec_var(&p, &u, &t); + rustsecp256k1_v0_10_0_pubkey_save(pubkey, &p); return 1; } static int ellswift_xdh_hash_function_prefix(unsigned char *output, const unsigned char *x32, const unsigned char *ell_a64, const unsigned char *ell_b64, void *data) { - rustsecp256k1_v0_9_2_sha256 sha; + rustsecp256k1_v0_10_0_sha256 sha; - rustsecp256k1_v0_9_2_sha256_initialize(&sha); - rustsecp256k1_v0_9_2_sha256_write(&sha, data, 64); - rustsecp256k1_v0_9_2_sha256_write(&sha, ell_a64, 64); - rustsecp256k1_v0_9_2_sha256_write(&sha, ell_b64, 64); - rustsecp256k1_v0_9_2_sha256_write(&sha, x32, 32); - rustsecp256k1_v0_9_2_sha256_finalize(&sha, output); + rustsecp256k1_v0_10_0_sha256_initialize(&sha); + rustsecp256k1_v0_10_0_sha256_write(&sha, data, 64); + rustsecp256k1_v0_10_0_sha256_write(&sha, ell_a64, 64); + rustsecp256k1_v0_10_0_sha256_write(&sha, ell_b64, 64); + rustsecp256k1_v0_10_0_sha256_write(&sha, x32, 32); + rustsecp256k1_v0_10_0_sha256_finalize(&sha, output); return 1; } /** Set hash state to the BIP340 tagged hash midstate for "bip324_ellswift_xonly_ecdh". */ -static void rustsecp256k1_v0_9_2_ellswift_sha256_init_bip324(rustsecp256k1_v0_9_2_sha256* hash) { - rustsecp256k1_v0_9_2_sha256_initialize(hash); +static void rustsecp256k1_v0_10_0_ellswift_sha256_init_bip324(rustsecp256k1_v0_10_0_sha256* hash) { + rustsecp256k1_v0_10_0_sha256_initialize(hash); hash->s[0] = 0x8c12d730ul; hash->s[1] = 0x827bd392ul; hash->s[2] = 0x9e4fb2eeul; @@ -529,27 +530,27 @@ static void rustsecp256k1_v0_9_2_ellswift_sha256_init_bip324(rustsecp256k1_v0_9_ } static int ellswift_xdh_hash_function_bip324(unsigned char* output, const unsigned char *x32, const unsigned char *ell_a64, const unsigned char *ell_b64, void *data) { - rustsecp256k1_v0_9_2_sha256 sha; + rustsecp256k1_v0_10_0_sha256 sha; (void)data; - rustsecp256k1_v0_9_2_ellswift_sha256_init_bip324(&sha); - rustsecp256k1_v0_9_2_sha256_write(&sha, ell_a64, 64); - rustsecp256k1_v0_9_2_sha256_write(&sha, ell_b64, 64); - rustsecp256k1_v0_9_2_sha256_write(&sha, x32, 32); - rustsecp256k1_v0_9_2_sha256_finalize(&sha, output); + rustsecp256k1_v0_10_0_ellswift_sha256_init_bip324(&sha); + rustsecp256k1_v0_10_0_sha256_write(&sha, ell_a64, 64); + rustsecp256k1_v0_10_0_sha256_write(&sha, ell_b64, 64); + rustsecp256k1_v0_10_0_sha256_write(&sha, x32, 32); + rustsecp256k1_v0_10_0_sha256_finalize(&sha, output); return 1; } -const rustsecp256k1_v0_9_2_ellswift_xdh_hash_function rustsecp256k1_v0_9_2_ellswift_xdh_hash_function_prefix = ellswift_xdh_hash_function_prefix; -const rustsecp256k1_v0_9_2_ellswift_xdh_hash_function rustsecp256k1_v0_9_2_ellswift_xdh_hash_function_bip324 = ellswift_xdh_hash_function_bip324; +const rustsecp256k1_v0_10_0_ellswift_xdh_hash_function rustsecp256k1_v0_10_0_ellswift_xdh_hash_function_prefix = ellswift_xdh_hash_function_prefix; +const rustsecp256k1_v0_10_0_ellswift_xdh_hash_function rustsecp256k1_v0_10_0_ellswift_xdh_hash_function_bip324 = ellswift_xdh_hash_function_bip324; -int rustsecp256k1_v0_9_2_ellswift_xdh(const rustsecp256k1_v0_9_2_context *ctx, unsigned char *output, const unsigned char *ell_a64, const unsigned char *ell_b64, const unsigned char *seckey32, int party, rustsecp256k1_v0_9_2_ellswift_xdh_hash_function hashfp, void *data) { +int rustsecp256k1_v0_10_0_ellswift_xdh(const rustsecp256k1_v0_10_0_context *ctx, unsigned char *output, const unsigned char *ell_a64, const unsigned char *ell_b64, const unsigned char *seckey32, int party, rustsecp256k1_v0_10_0_ellswift_xdh_hash_function hashfp, void *data) { int ret = 0; int overflow; - rustsecp256k1_v0_9_2_scalar s; - rustsecp256k1_v0_9_2_fe xn, xd, px, u, t; + rustsecp256k1_v0_10_0_scalar s; + rustsecp256k1_v0_10_0_fe xn, xd, px, u, t; unsigned char sx[32]; const unsigned char* theirs64; @@ -562,26 +563,26 @@ int rustsecp256k1_v0_9_2_ellswift_xdh(const rustsecp256k1_v0_9_2_context *ctx, u /* Load remote public key (as fraction). */ theirs64 = party ? ell_a64 : ell_b64; - rustsecp256k1_v0_9_2_fe_set_b32_mod(&u, theirs64); - rustsecp256k1_v0_9_2_fe_set_b32_mod(&t, theirs64 + 32); - rustsecp256k1_v0_9_2_ellswift_xswiftec_frac_var(&xn, &xd, &u, &t); + rustsecp256k1_v0_10_0_fe_set_b32_mod(&u, theirs64); + rustsecp256k1_v0_10_0_fe_set_b32_mod(&t, theirs64 + 32); + rustsecp256k1_v0_10_0_ellswift_xswiftec_frac_var(&xn, &xd, &u, &t); /* Load private key (using one if invalid). */ - rustsecp256k1_v0_9_2_scalar_set_b32(&s, seckey32, &overflow); - overflow = rustsecp256k1_v0_9_2_scalar_is_zero(&s); - rustsecp256k1_v0_9_2_scalar_cmov(&s, &rustsecp256k1_v0_9_2_scalar_one, overflow); + rustsecp256k1_v0_10_0_scalar_set_b32(&s, seckey32, &overflow); + overflow = rustsecp256k1_v0_10_0_scalar_is_zero(&s); + rustsecp256k1_v0_10_0_scalar_cmov(&s, &rustsecp256k1_v0_10_0_scalar_one, overflow); /* Compute shared X coordinate. */ - rustsecp256k1_v0_9_2_ecmult_const_xonly(&px, &xn, &xd, &s, 1); - rustsecp256k1_v0_9_2_fe_normalize(&px); - rustsecp256k1_v0_9_2_fe_get_b32(sx, &px); + rustsecp256k1_v0_10_0_ecmult_const_xonly(&px, &xn, &xd, &s, 1); + rustsecp256k1_v0_10_0_fe_normalize(&px); + rustsecp256k1_v0_10_0_fe_get_b32(sx, &px); /* Invoke hasher */ ret = hashfp(output, sx, ell_a64, ell_b64, data); memset(sx, 0, 32); - rustsecp256k1_v0_9_2_fe_clear(&px); - rustsecp256k1_v0_9_2_scalar_clear(&s); + rustsecp256k1_v0_10_0_fe_clear(&px); + rustsecp256k1_v0_10_0_scalar_clear(&s); return !!ret & !overflow; } diff --git a/secp256k1-sys/depend/secp256k1/src/modules/ellswift/tests_exhaustive_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/ellswift/tests_exhaustive_impl.h index 694274d53..f6f023861 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/ellswift/tests_exhaustive_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/ellswift/tests_exhaustive_impl.h @@ -9,7 +9,7 @@ #include "../../../include/secp256k1_ellswift.h" #include "main_impl.h" -static void test_exhaustive_ellswift(const rustsecp256k1_v0_9_2_context *ctx, const rustsecp256k1_v0_9_2_ge *group) { +static void test_exhaustive_ellswift(const rustsecp256k1_v0_10_0_context *ctx, const rustsecp256k1_v0_10_0_ge *group) { int i; /* Note that SwiftEC/ElligatorSwift are inherently curve operations, not @@ -18,21 +18,21 @@ static void test_exhaustive_ellswift(const rustsecp256k1_v0_9_2_context *ctx, co * it doesn't (and for computational reasons obviously cannot) test the * entire domain ellswift operates under. */ for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { - rustsecp256k1_v0_9_2_scalar scalar_i; + rustsecp256k1_v0_10_0_scalar scalar_i; unsigned char sec32[32]; unsigned char ell64[64]; - rustsecp256k1_v0_9_2_pubkey pub_decoded; - rustsecp256k1_v0_9_2_ge ge_decoded; + rustsecp256k1_v0_10_0_pubkey pub_decoded; + rustsecp256k1_v0_10_0_ge ge_decoded; /* Construct ellswift pubkey from exhaustive loop scalar i. */ - rustsecp256k1_v0_9_2_scalar_set_int(&scalar_i, i); - rustsecp256k1_v0_9_2_scalar_get_b32(sec32, &scalar_i); - CHECK(rustsecp256k1_v0_9_2_ellswift_create(ctx, ell64, sec32, NULL)); + rustsecp256k1_v0_10_0_scalar_set_int(&scalar_i, i); + rustsecp256k1_v0_10_0_scalar_get_b32(sec32, &scalar_i); + CHECK(rustsecp256k1_v0_10_0_ellswift_create(ctx, ell64, sec32, NULL)); /* Decode ellswift pubkey and check that it matches the precomputed group element. */ - rustsecp256k1_v0_9_2_ellswift_decode(ctx, &pub_decoded, ell64); - rustsecp256k1_v0_9_2_pubkey_load(ctx, &ge_decoded, &pub_decoded); - ge_equals_ge(&ge_decoded, &group[i]); + rustsecp256k1_v0_10_0_ellswift_decode(ctx, &pub_decoded, ell64); + rustsecp256k1_v0_10_0_pubkey_load(ctx, &ge_decoded, &pub_decoded); + CHECK(rustsecp256k1_v0_10_0_ge_eq_var(&ge_decoded, &group[i])); } } diff --git a/secp256k1-sys/depend/secp256k1/src/modules/ellswift/tests_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/ellswift/tests_impl.h index 9c9e0292b..53ec88d39 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/ellswift/tests_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/ellswift/tests_impl.h @@ -10,14 +10,14 @@ struct ellswift_xswiftec_inv_test { int enc_bitmap; - rustsecp256k1_v0_9_2_fe u; - rustsecp256k1_v0_9_2_fe x; - rustsecp256k1_v0_9_2_fe encs[8]; + rustsecp256k1_v0_10_0_fe u; + rustsecp256k1_v0_10_0_fe x; + rustsecp256k1_v0_10_0_fe encs[8]; }; struct ellswift_decode_test { unsigned char enc[64]; - rustsecp256k1_v0_9_2_fe x; + rustsecp256k1_v0_10_0_fe x; int odd_y; }; @@ -183,28 +183,28 @@ void run_ellswift_tests(void) { const struct ellswift_xswiftec_inv_test *testcase = &ellswift_xswiftec_inv_tests[i]; int c; for (c = 0; c < 8; ++c) { - rustsecp256k1_v0_9_2_fe t; - int ret = rustsecp256k1_v0_9_2_ellswift_xswiftec_inv_var(&t, &testcase->x, &testcase->u, c); + rustsecp256k1_v0_10_0_fe t; + int ret = rustsecp256k1_v0_10_0_ellswift_xswiftec_inv_var(&t, &testcase->x, &testcase->u, c); CHECK(ret == ((testcase->enc_bitmap >> c) & 1)); if (ret) { - rustsecp256k1_v0_9_2_fe x2; + rustsecp256k1_v0_10_0_fe x2; CHECK(check_fe_equal(&t, &testcase->encs[c])); - rustsecp256k1_v0_9_2_ellswift_xswiftec_var(&x2, &testcase->u, &testcase->encs[c]); + rustsecp256k1_v0_10_0_ellswift_xswiftec_var(&x2, &testcase->u, &testcase->encs[c]); CHECK(check_fe_equal(&testcase->x, &x2)); } } } for (i = 0; (unsigned)i < sizeof(ellswift_decode_tests) / sizeof(ellswift_decode_tests[0]); ++i) { const struct ellswift_decode_test *testcase = &ellswift_decode_tests[i]; - rustsecp256k1_v0_9_2_pubkey pubkey; - rustsecp256k1_v0_9_2_ge ge; + rustsecp256k1_v0_10_0_pubkey pubkey; + rustsecp256k1_v0_10_0_ge ge; int ret; - ret = rustsecp256k1_v0_9_2_ellswift_decode(CTX, &pubkey, testcase->enc); + ret = rustsecp256k1_v0_10_0_ellswift_decode(CTX, &pubkey, testcase->enc); CHECK(ret); - ret = rustsecp256k1_v0_9_2_pubkey_load(CTX, &ge, &pubkey); + ret = rustsecp256k1_v0_10_0_pubkey_load(CTX, &ge, &pubkey); CHECK(ret); CHECK(check_fe_equal(&testcase->x, &ge.x)); - CHECK(rustsecp256k1_v0_9_2_fe_is_odd(&ge.y) == testcase->odd_y); + CHECK(rustsecp256k1_v0_10_0_fe_is_odd(&ge.y) == testcase->odd_y); } for (i = 0; (unsigned)i < sizeof(ellswift_xdh_tests_bip324) / sizeof(ellswift_xdh_tests_bip324[0]); ++i) { const struct ellswift_xdh_test *test = &ellswift_xdh_tests_bip324[i]; @@ -213,94 +213,94 @@ void run_ellswift_tests(void) { int party = !test->initiating; const unsigned char* ell_a64 = party ? test->ellswift_theirs : test->ellswift_ours; const unsigned char* ell_b64 = party ? test->ellswift_ours : test->ellswift_theirs; - ret = rustsecp256k1_v0_9_2_ellswift_xdh(CTX, shared_secret, + ret = rustsecp256k1_v0_10_0_ellswift_xdh(CTX, shared_secret, ell_a64, ell_b64, test->priv_ours, party, - rustsecp256k1_v0_9_2_ellswift_xdh_hash_function_bip324, + rustsecp256k1_v0_10_0_ellswift_xdh_hash_function_bip324, NULL); CHECK(ret); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(shared_secret, test->shared_secret, 32) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(shared_secret, test->shared_secret, 32) == 0); } - /* Verify that rustsecp256k1_v0_9_2_ellswift_encode + decode roundtrips. */ + /* Verify that rustsecp256k1_v0_10_0_ellswift_encode + decode roundtrips. */ for (i = 0; i < 1000 * COUNT; i++) { unsigned char rnd32[32]; unsigned char ell64[64]; - rustsecp256k1_v0_9_2_ge g, g2; - rustsecp256k1_v0_9_2_pubkey pubkey, pubkey2; + rustsecp256k1_v0_10_0_ge g, g2; + rustsecp256k1_v0_10_0_pubkey pubkey, pubkey2; /* Generate random public key and random randomizer. */ random_group_element_test(&g); - rustsecp256k1_v0_9_2_pubkey_save(&pubkey, &g); - rustsecp256k1_v0_9_2_testrand256(rnd32); + rustsecp256k1_v0_10_0_pubkey_save(&pubkey, &g); + rustsecp256k1_v0_10_0_testrand256(rnd32); /* Convert the public key to ElligatorSwift and back. */ - rustsecp256k1_v0_9_2_ellswift_encode(CTX, ell64, &pubkey, rnd32); - rustsecp256k1_v0_9_2_ellswift_decode(CTX, &pubkey2, ell64); - rustsecp256k1_v0_9_2_pubkey_load(CTX, &g2, &pubkey2); + rustsecp256k1_v0_10_0_ellswift_encode(CTX, ell64, &pubkey, rnd32); + rustsecp256k1_v0_10_0_ellswift_decode(CTX, &pubkey2, ell64); + rustsecp256k1_v0_10_0_pubkey_load(CTX, &g2, &pubkey2); /* Compare with original. */ - ge_equals_ge(&g, &g2); + CHECK(rustsecp256k1_v0_10_0_ge_eq_var(&g, &g2)); } - /* Verify the behavior of rustsecp256k1_v0_9_2_ellswift_create */ + /* Verify the behavior of rustsecp256k1_v0_10_0_ellswift_create */ for (i = 0; i < 400 * COUNT; i++) { unsigned char auxrnd32[32], sec32[32]; - rustsecp256k1_v0_9_2_scalar sec; - rustsecp256k1_v0_9_2_gej res; - rustsecp256k1_v0_9_2_ge dec; - rustsecp256k1_v0_9_2_pubkey pub; + rustsecp256k1_v0_10_0_scalar sec; + rustsecp256k1_v0_10_0_gej res; + rustsecp256k1_v0_10_0_ge dec; + rustsecp256k1_v0_10_0_pubkey pub; unsigned char ell64[64]; int ret; /* Generate random secret key and random randomizer. */ - if (i & 1) rustsecp256k1_v0_9_2_testrand256_test(auxrnd32); + if (i & 1) rustsecp256k1_v0_10_0_testrand256_test(auxrnd32); random_scalar_order_test(&sec); - rustsecp256k1_v0_9_2_scalar_get_b32(sec32, &sec); + rustsecp256k1_v0_10_0_scalar_get_b32(sec32, &sec); /* Construct ElligatorSwift-encoded public keys for that key. */ - ret = rustsecp256k1_v0_9_2_ellswift_create(CTX, ell64, sec32, (i & 1) ? auxrnd32 : NULL); + ret = rustsecp256k1_v0_10_0_ellswift_create(CTX, ell64, sec32, (i & 1) ? auxrnd32 : NULL); CHECK(ret); /* Decode it, and compare with traditionally-computed public key. */ - rustsecp256k1_v0_9_2_ellswift_decode(CTX, &pub, ell64); - rustsecp256k1_v0_9_2_pubkey_load(CTX, &dec, &pub); - rustsecp256k1_v0_9_2_ecmult(&res, NULL, &rustsecp256k1_v0_9_2_scalar_zero, &sec); - ge_equals_gej(&dec, &res); + rustsecp256k1_v0_10_0_ellswift_decode(CTX, &pub, ell64); + rustsecp256k1_v0_10_0_pubkey_load(CTX, &dec, &pub); + rustsecp256k1_v0_10_0_ecmult(&res, NULL, &rustsecp256k1_v0_10_0_scalar_zero, &sec); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&res, &dec)); } - /* Verify that rustsecp256k1_v0_9_2_ellswift_xdh computes the right shared X coordinate. */ + /* Verify that rustsecp256k1_v0_10_0_ellswift_xdh computes the right shared X coordinate. */ for (i = 0; i < 800 * COUNT; i++) { unsigned char ell64[64], sec32[32], share32[32]; - rustsecp256k1_v0_9_2_scalar sec; - rustsecp256k1_v0_9_2_ge dec, res; - rustsecp256k1_v0_9_2_fe share_x; - rustsecp256k1_v0_9_2_gej decj, resj; - rustsecp256k1_v0_9_2_pubkey pub; + rustsecp256k1_v0_10_0_scalar sec; + rustsecp256k1_v0_10_0_ge dec, res; + rustsecp256k1_v0_10_0_fe share_x; + rustsecp256k1_v0_10_0_gej decj, resj; + rustsecp256k1_v0_10_0_pubkey pub; int ret; /* Generate random secret key. */ random_scalar_order_test(&sec); - rustsecp256k1_v0_9_2_scalar_get_b32(sec32, &sec); + rustsecp256k1_v0_10_0_scalar_get_b32(sec32, &sec); /* Generate random ElligatorSwift encoding for the remote key and decode it. */ - rustsecp256k1_v0_9_2_testrand256_test(ell64); - rustsecp256k1_v0_9_2_testrand256_test(ell64 + 32); - rustsecp256k1_v0_9_2_ellswift_decode(CTX, &pub, ell64); - rustsecp256k1_v0_9_2_pubkey_load(CTX, &dec, &pub); - rustsecp256k1_v0_9_2_gej_set_ge(&decj, &dec); + rustsecp256k1_v0_10_0_testrand256_test(ell64); + rustsecp256k1_v0_10_0_testrand256_test(ell64 + 32); + rustsecp256k1_v0_10_0_ellswift_decode(CTX, &pub, ell64); + rustsecp256k1_v0_10_0_pubkey_load(CTX, &dec, &pub); + rustsecp256k1_v0_10_0_gej_set_ge(&decj, &dec); /* Compute the X coordinate of seckey*pubkey using ellswift_xdh. Note that we * pass ell64 as claimed (but incorrect) encoding for sec32 here; this works * because the "hasher" function we use here ignores the ell64 arguments. */ - ret = rustsecp256k1_v0_9_2_ellswift_xdh(CTX, share32, ell64, ell64, sec32, i & 1, &ellswift_xdh_hash_x32, NULL); + ret = rustsecp256k1_v0_10_0_ellswift_xdh(CTX, share32, ell64, ell64, sec32, i & 1, &ellswift_xdh_hash_x32, NULL); CHECK(ret); - (void)rustsecp256k1_v0_9_2_fe_set_b32_limit(&share_x, share32); /* no overflow is possible */ - rustsecp256k1_v0_9_2_fe_verify(&share_x); + (void)rustsecp256k1_v0_10_0_fe_set_b32_limit(&share_x, share32); /* no overflow is possible */ + SECP256K1_FE_VERIFY(&share_x); /* Compute seckey*pubkey directly. */ - rustsecp256k1_v0_9_2_ecmult(&resj, &decj, &sec, NULL); - rustsecp256k1_v0_9_2_ge_set_gej(&res, &resj); + rustsecp256k1_v0_10_0_ecmult(&resj, &decj, &sec, NULL); + rustsecp256k1_v0_10_0_ge_set_gej(&res, &resj); /* Compare. */ CHECK(check_fe_equal(&res.x, &share_x)); } - /* Verify the joint behavior of rustsecp256k1_v0_9_2_ellswift_xdh */ + /* Verify the joint behavior of rustsecp256k1_v0_10_0_ellswift_xdh */ for (i = 0; i < 200 * COUNT; i++) { unsigned char auxrnd32a[32], auxrnd32b[32], auxrnd32a_bad[32], auxrnd32b_bad[32]; unsigned char sec32a[32], sec32b[32], sec32a_bad[32], sec32b_bad[32]; - rustsecp256k1_v0_9_2_scalar seca, secb; + rustsecp256k1_v0_10_0_scalar seca, secb; unsigned char ell64a[64], ell64b[64], ell64a_bad[64], ell64b_bad[64]; unsigned char share32a[32], share32b[32], share32_bad[32]; unsigned char prefix64[64]; - rustsecp256k1_v0_9_2_ellswift_xdh_hash_function hash_function; + rustsecp256k1_v0_10_0_ellswift_xdh_hash_function hash_function; void* data; int ret; @@ -309,126 +309,126 @@ void run_ellswift_tests(void) { hash_function = ellswift_xdh_hash_x32; data = NULL; } else if ((i % 3) == 1) { - hash_function = rustsecp256k1_v0_9_2_ellswift_xdh_hash_function_bip324; + hash_function = rustsecp256k1_v0_10_0_ellswift_xdh_hash_function_bip324; data = NULL; } else { - hash_function = rustsecp256k1_v0_9_2_ellswift_xdh_hash_function_prefix; - rustsecp256k1_v0_9_2_testrand256_test(prefix64); - rustsecp256k1_v0_9_2_testrand256_test(prefix64 + 32); + hash_function = rustsecp256k1_v0_10_0_ellswift_xdh_hash_function_prefix; + rustsecp256k1_v0_10_0_testrand256_test(prefix64); + rustsecp256k1_v0_10_0_testrand256_test(prefix64 + 32); data = prefix64; } /* Generate random secret keys and random randomizers. */ - rustsecp256k1_v0_9_2_testrand256_test(auxrnd32a); - rustsecp256k1_v0_9_2_testrand256_test(auxrnd32b); + rustsecp256k1_v0_10_0_testrand256_test(auxrnd32a); + rustsecp256k1_v0_10_0_testrand256_test(auxrnd32b); random_scalar_order_test(&seca); /* Draw secb uniformly at random to make sure that the secret keys * differ */ random_scalar_order(&secb); - rustsecp256k1_v0_9_2_scalar_get_b32(sec32a, &seca); - rustsecp256k1_v0_9_2_scalar_get_b32(sec32b, &secb); + rustsecp256k1_v0_10_0_scalar_get_b32(sec32a, &seca); + rustsecp256k1_v0_10_0_scalar_get_b32(sec32b, &secb); /* Construct ElligatorSwift-encoded public keys for those keys. */ /* For A: */ - ret = rustsecp256k1_v0_9_2_ellswift_create(CTX, ell64a, sec32a, auxrnd32a); + ret = rustsecp256k1_v0_10_0_ellswift_create(CTX, ell64a, sec32a, auxrnd32a); CHECK(ret); /* For B: */ - ret = rustsecp256k1_v0_9_2_ellswift_create(CTX, ell64b, sec32b, auxrnd32b); + ret = rustsecp256k1_v0_10_0_ellswift_create(CTX, ell64b, sec32b, auxrnd32b); CHECK(ret); /* Compute the shared secret both ways and compare with each other. */ /* For A: */ - ret = rustsecp256k1_v0_9_2_ellswift_xdh(CTX, share32a, ell64a, ell64b, sec32a, 0, hash_function, data); + ret = rustsecp256k1_v0_10_0_ellswift_xdh(CTX, share32a, ell64a, ell64b, sec32a, 0, hash_function, data); CHECK(ret); /* For B: */ - ret = rustsecp256k1_v0_9_2_ellswift_xdh(CTX, share32b, ell64a, ell64b, sec32b, 1, hash_function, data); + ret = rustsecp256k1_v0_10_0_ellswift_xdh(CTX, share32b, ell64a, ell64b, sec32b, 1, hash_function, data); CHECK(ret); /* And compare: */ - CHECK(rustsecp256k1_v0_9_2_memcmp_var(share32a, share32b, 32) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(share32a, share32b, 32) == 0); /* Verify that the shared secret doesn't match if other side's public key is incorrect. */ /* For A (using a bad public key for B): */ memcpy(ell64b_bad, ell64b, sizeof(ell64a_bad)); - rustsecp256k1_v0_9_2_testrand_flip(ell64b_bad, sizeof(ell64b_bad)); - ret = rustsecp256k1_v0_9_2_ellswift_xdh(CTX, share32_bad, ell64a, ell64b_bad, sec32a, 0, hash_function, data); - CHECK(ret); /* Mismatching encodings don't get detected by rustsecp256k1_v0_9_2_ellswift_xdh. */ - CHECK(rustsecp256k1_v0_9_2_memcmp_var(share32_bad, share32a, 32) != 0); + rustsecp256k1_v0_10_0_testrand_flip(ell64b_bad, sizeof(ell64b_bad)); + ret = rustsecp256k1_v0_10_0_ellswift_xdh(CTX, share32_bad, ell64a, ell64b_bad, sec32a, 0, hash_function, data); + CHECK(ret); /* Mismatching encodings don't get detected by rustsecp256k1_v0_10_0_ellswift_xdh. */ + CHECK(rustsecp256k1_v0_10_0_memcmp_var(share32_bad, share32a, 32) != 0); /* For B (using a bad public key for A): */ memcpy(ell64a_bad, ell64a, sizeof(ell64a_bad)); - rustsecp256k1_v0_9_2_testrand_flip(ell64a_bad, sizeof(ell64a_bad)); - ret = rustsecp256k1_v0_9_2_ellswift_xdh(CTX, share32_bad, ell64a_bad, ell64b, sec32b, 1, hash_function, data); + rustsecp256k1_v0_10_0_testrand_flip(ell64a_bad, sizeof(ell64a_bad)); + ret = rustsecp256k1_v0_10_0_ellswift_xdh(CTX, share32_bad, ell64a_bad, ell64b, sec32b, 1, hash_function, data); CHECK(ret); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(share32_bad, share32b, 32) != 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(share32_bad, share32b, 32) != 0); /* Verify that the shared secret doesn't match if the private key is incorrect. */ /* For A: */ memcpy(sec32a_bad, sec32a, sizeof(sec32a_bad)); - rustsecp256k1_v0_9_2_testrand_flip(sec32a_bad, sizeof(sec32a_bad)); - ret = rustsecp256k1_v0_9_2_ellswift_xdh(CTX, share32_bad, ell64a, ell64b, sec32a_bad, 0, hash_function, data); - CHECK(!ret || rustsecp256k1_v0_9_2_memcmp_var(share32_bad, share32a, 32) != 0); + rustsecp256k1_v0_10_0_testrand_flip(sec32a_bad, sizeof(sec32a_bad)); + ret = rustsecp256k1_v0_10_0_ellswift_xdh(CTX, share32_bad, ell64a, ell64b, sec32a_bad, 0, hash_function, data); + CHECK(!ret || rustsecp256k1_v0_10_0_memcmp_var(share32_bad, share32a, 32) != 0); /* For B: */ memcpy(sec32b_bad, sec32b, sizeof(sec32b_bad)); - rustsecp256k1_v0_9_2_testrand_flip(sec32b_bad, sizeof(sec32b_bad)); - ret = rustsecp256k1_v0_9_2_ellswift_xdh(CTX, share32_bad, ell64a, ell64b, sec32b_bad, 1, hash_function, data); - CHECK(!ret || rustsecp256k1_v0_9_2_memcmp_var(share32_bad, share32b, 32) != 0); + rustsecp256k1_v0_10_0_testrand_flip(sec32b_bad, sizeof(sec32b_bad)); + ret = rustsecp256k1_v0_10_0_ellswift_xdh(CTX, share32_bad, ell64a, ell64b, sec32b_bad, 1, hash_function, data); + CHECK(!ret || rustsecp256k1_v0_10_0_memcmp_var(share32_bad, share32b, 32) != 0); if (hash_function != ellswift_xdh_hash_x32) { /* Verify that the shared secret doesn't match when a different encoding of the same public key is used. */ /* For A (changing B's public key): */ memcpy(auxrnd32b_bad, auxrnd32b, sizeof(auxrnd32b_bad)); - rustsecp256k1_v0_9_2_testrand_flip(auxrnd32b_bad, sizeof(auxrnd32b_bad)); - ret = rustsecp256k1_v0_9_2_ellswift_create(CTX, ell64b_bad, sec32b, auxrnd32b_bad); + rustsecp256k1_v0_10_0_testrand_flip(auxrnd32b_bad, sizeof(auxrnd32b_bad)); + ret = rustsecp256k1_v0_10_0_ellswift_create(CTX, ell64b_bad, sec32b, auxrnd32b_bad); CHECK(ret); - ret = rustsecp256k1_v0_9_2_ellswift_xdh(CTX, share32_bad, ell64a, ell64b_bad, sec32a, 0, hash_function, data); + ret = rustsecp256k1_v0_10_0_ellswift_xdh(CTX, share32_bad, ell64a, ell64b_bad, sec32a, 0, hash_function, data); CHECK(ret); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(share32_bad, share32a, 32) != 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(share32_bad, share32a, 32) != 0); /* For B (changing A's public key): */ memcpy(auxrnd32a_bad, auxrnd32a, sizeof(auxrnd32a_bad)); - rustsecp256k1_v0_9_2_testrand_flip(auxrnd32a_bad, sizeof(auxrnd32a_bad)); - ret = rustsecp256k1_v0_9_2_ellswift_create(CTX, ell64a_bad, sec32a, auxrnd32a_bad); + rustsecp256k1_v0_10_0_testrand_flip(auxrnd32a_bad, sizeof(auxrnd32a_bad)); + ret = rustsecp256k1_v0_10_0_ellswift_create(CTX, ell64a_bad, sec32a, auxrnd32a_bad); CHECK(ret); - ret = rustsecp256k1_v0_9_2_ellswift_xdh(CTX, share32_bad, ell64a_bad, ell64b, sec32b, 1, hash_function, data); + ret = rustsecp256k1_v0_10_0_ellswift_xdh(CTX, share32_bad, ell64a_bad, ell64b, sec32b, 1, hash_function, data); CHECK(ret); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(share32_bad, share32b, 32) != 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(share32_bad, share32b, 32) != 0); /* Verify that swapping sides changes the shared secret. */ /* For A (claiming to be B): */ - ret = rustsecp256k1_v0_9_2_ellswift_xdh(CTX, share32_bad, ell64a, ell64b, sec32a, 1, hash_function, data); + ret = rustsecp256k1_v0_10_0_ellswift_xdh(CTX, share32_bad, ell64a, ell64b, sec32a, 1, hash_function, data); CHECK(ret); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(share32_bad, share32a, 32) != 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(share32_bad, share32a, 32) != 0); /* For B (claiming to be A): */ - ret = rustsecp256k1_v0_9_2_ellswift_xdh(CTX, share32_bad, ell64a, ell64b, sec32b, 0, hash_function, data); + ret = rustsecp256k1_v0_10_0_ellswift_xdh(CTX, share32_bad, ell64a, ell64b, sec32b, 0, hash_function, data); CHECK(ret); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(share32_bad, share32b, 32) != 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(share32_bad, share32b, 32) != 0); } } /* Test hash initializers. */ { - rustsecp256k1_v0_9_2_sha256 sha, sha_optimized; - static const unsigned char encode_tag[25] = "rustsecp256k1_v0_9_2_ellswift_encode"; - static const unsigned char create_tag[25] = "rustsecp256k1_v0_9_2_ellswift_create"; + rustsecp256k1_v0_10_0_sha256 sha, sha_optimized; + static const unsigned char encode_tag[25] = "rustsecp256k1_v0_10_0_ellswift_encode"; + static const unsigned char create_tag[25] = "rustsecp256k1_v0_10_0_ellswift_create"; static const unsigned char bip324_tag[26] = "bip324_ellswift_xonly_ecdh"; /* Check that hash initialized by - * rustsecp256k1_v0_9_2_ellswift_sha256_init_encode has the expected + * rustsecp256k1_v0_10_0_ellswift_sha256_init_encode has the expected * state. */ - rustsecp256k1_v0_9_2_sha256_initialize_tagged(&sha, encode_tag, sizeof(encode_tag)); - rustsecp256k1_v0_9_2_ellswift_sha256_init_encode(&sha_optimized); + rustsecp256k1_v0_10_0_sha256_initialize_tagged(&sha, encode_tag, sizeof(encode_tag)); + rustsecp256k1_v0_10_0_ellswift_sha256_init_encode(&sha_optimized); test_sha256_eq(&sha, &sha_optimized); /* Check that hash initialized by - * rustsecp256k1_v0_9_2_ellswift_sha256_init_create has the expected + * rustsecp256k1_v0_10_0_ellswift_sha256_init_create has the expected * state. */ - rustsecp256k1_v0_9_2_sha256_initialize_tagged(&sha, create_tag, sizeof(create_tag)); - rustsecp256k1_v0_9_2_ellswift_sha256_init_create(&sha_optimized); + rustsecp256k1_v0_10_0_sha256_initialize_tagged(&sha, create_tag, sizeof(create_tag)); + rustsecp256k1_v0_10_0_ellswift_sha256_init_create(&sha_optimized); test_sha256_eq(&sha, &sha_optimized); /* Check that hash initialized by - * rustsecp256k1_v0_9_2_ellswift_sha256_init_bip324 has the expected + * rustsecp256k1_v0_10_0_ellswift_sha256_init_bip324 has the expected * state. */ - rustsecp256k1_v0_9_2_sha256_initialize_tagged(&sha, bip324_tag, sizeof(bip324_tag)); - rustsecp256k1_v0_9_2_ellswift_sha256_init_bip324(&sha_optimized); + rustsecp256k1_v0_10_0_sha256_initialize_tagged(&sha, bip324_tag, sizeof(bip324_tag)); + rustsecp256k1_v0_10_0_ellswift_sha256_init_bip324(&sha_optimized); test_sha256_eq(&sha, &sha_optimized); } } diff --git a/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/Makefile.am.include b/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/Makefile.am.include index 8fd144ab4..7893e24bb 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/Makefile.am.include +++ b/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/Makefile.am.include @@ -1,4 +1,4 @@ -include_HEADERS += include/rustsecp256k1_v0_9_2_extrakeys.h +include_HEADERS += include/rustsecp256k1_v0_10_0_extrakeys.h noinst_HEADERS += src/modules/extrakeys/tests_impl.h noinst_HEADERS += src/modules/extrakeys/tests_exhaustive_impl.h noinst_HEADERS += src/modules/extrakeys/main_impl.h diff --git a/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/main_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/main_impl.h index fc66627ed..ab5692cd0 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/main_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/main_impl.h @@ -11,54 +11,54 @@ #include "../../../include/secp256k1_extrakeys.h" #include "../../util.h" -static SECP256K1_INLINE int rustsecp256k1_v0_9_2_xonly_pubkey_load(const rustsecp256k1_v0_9_2_context* ctx, rustsecp256k1_v0_9_2_ge *ge, const rustsecp256k1_v0_9_2_xonly_pubkey *pubkey) { - return rustsecp256k1_v0_9_2_pubkey_load(ctx, ge, (const rustsecp256k1_v0_9_2_pubkey *) pubkey); +static SECP256K1_INLINE int rustsecp256k1_v0_10_0_xonly_pubkey_load(const rustsecp256k1_v0_10_0_context* ctx, rustsecp256k1_v0_10_0_ge *ge, const rustsecp256k1_v0_10_0_xonly_pubkey *pubkey) { + return rustsecp256k1_v0_10_0_pubkey_load(ctx, ge, (const rustsecp256k1_v0_10_0_pubkey *) pubkey); } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_xonly_pubkey_save(rustsecp256k1_v0_9_2_xonly_pubkey *pubkey, rustsecp256k1_v0_9_2_ge *ge) { - rustsecp256k1_v0_9_2_pubkey_save((rustsecp256k1_v0_9_2_pubkey *) pubkey, ge); +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_xonly_pubkey_save(rustsecp256k1_v0_10_0_xonly_pubkey *pubkey, rustsecp256k1_v0_10_0_ge *ge) { + rustsecp256k1_v0_10_0_pubkey_save((rustsecp256k1_v0_10_0_pubkey *) pubkey, ge); } -int rustsecp256k1_v0_9_2_xonly_pubkey_parse(const rustsecp256k1_v0_9_2_context* ctx, rustsecp256k1_v0_9_2_xonly_pubkey *pubkey, const unsigned char *input32) { - rustsecp256k1_v0_9_2_ge pk; - rustsecp256k1_v0_9_2_fe x; +int rustsecp256k1_v0_10_0_xonly_pubkey_parse(const rustsecp256k1_v0_10_0_context* ctx, rustsecp256k1_v0_10_0_xonly_pubkey *pubkey, const unsigned char *input32) { + rustsecp256k1_v0_10_0_ge pk; + rustsecp256k1_v0_10_0_fe x; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); memset(pubkey, 0, sizeof(*pubkey)); ARG_CHECK(input32 != NULL); - if (!rustsecp256k1_v0_9_2_fe_set_b32_limit(&x, input32)) { + if (!rustsecp256k1_v0_10_0_fe_set_b32_limit(&x, input32)) { return 0; } - if (!rustsecp256k1_v0_9_2_ge_set_xo_var(&pk, &x, 0)) { + if (!rustsecp256k1_v0_10_0_ge_set_xo_var(&pk, &x, 0)) { return 0; } - if (!rustsecp256k1_v0_9_2_ge_is_in_correct_subgroup(&pk)) { + if (!rustsecp256k1_v0_10_0_ge_is_in_correct_subgroup(&pk)) { return 0; } - rustsecp256k1_v0_9_2_xonly_pubkey_save(pubkey, &pk); + rustsecp256k1_v0_10_0_xonly_pubkey_save(pubkey, &pk); return 1; } -int rustsecp256k1_v0_9_2_xonly_pubkey_serialize(const rustsecp256k1_v0_9_2_context* ctx, unsigned char *output32, const rustsecp256k1_v0_9_2_xonly_pubkey *pubkey) { - rustsecp256k1_v0_9_2_ge pk; +int rustsecp256k1_v0_10_0_xonly_pubkey_serialize(const rustsecp256k1_v0_10_0_context* ctx, unsigned char *output32, const rustsecp256k1_v0_10_0_xonly_pubkey *pubkey) { + rustsecp256k1_v0_10_0_ge pk; VERIFY_CHECK(ctx != NULL); ARG_CHECK(output32 != NULL); memset(output32, 0, 32); ARG_CHECK(pubkey != NULL); - if (!rustsecp256k1_v0_9_2_xonly_pubkey_load(ctx, &pk, pubkey)) { + if (!rustsecp256k1_v0_10_0_xonly_pubkey_load(ctx, &pk, pubkey)) { return 0; } - rustsecp256k1_v0_9_2_fe_get_b32(output32, &pk.x); + rustsecp256k1_v0_10_0_fe_get_b32(output32, &pk.x); return 1; } -int rustsecp256k1_v0_9_2_xonly_pubkey_cmp(const rustsecp256k1_v0_9_2_context* ctx, const rustsecp256k1_v0_9_2_xonly_pubkey* pk0, const rustsecp256k1_v0_9_2_xonly_pubkey* pk1) { +int rustsecp256k1_v0_10_0_xonly_pubkey_cmp(const rustsecp256k1_v0_10_0_context* ctx, const rustsecp256k1_v0_10_0_xonly_pubkey* pk0, const rustsecp256k1_v0_10_0_xonly_pubkey* pk1) { unsigned char out[2][32]; - const rustsecp256k1_v0_9_2_xonly_pubkey* pk[2]; + const rustsecp256k1_v0_10_0_xonly_pubkey* pk[2]; int i; VERIFY_CHECK(ctx != NULL); @@ -71,7 +71,7 @@ int rustsecp256k1_v0_9_2_xonly_pubkey_cmp(const rustsecp256k1_v0_9_2_context* ct * pubkeys are involved and prevents edge cases such as sorting * algorithms that use this function and do not terminate as a * result. */ - if (!rustsecp256k1_v0_9_2_xonly_pubkey_serialize(ctx, out[i], pk[i])) { + if (!rustsecp256k1_v0_10_0_xonly_pubkey_serialize(ctx, out[i], pk[i])) { /* Note that xonly_pubkey_serialize should already set the output to * zero in that case, but it's not guaranteed by the API, we can't * test it and writing a VERIFY_CHECK is more complex than @@ -79,44 +79,44 @@ int rustsecp256k1_v0_9_2_xonly_pubkey_cmp(const rustsecp256k1_v0_9_2_context* ct memset(out[i], 0, sizeof(out[i])); } } - return rustsecp256k1_v0_9_2_memcmp_var(out[0], out[1], sizeof(out[1])); + return rustsecp256k1_v0_10_0_memcmp_var(out[0], out[1], sizeof(out[1])); } /** Keeps a group element as is if it has an even Y and otherwise negates it. * y_parity is set to 0 in the former case and to 1 in the latter case. * Requires that the coordinates of r are normalized. */ -static int rustsecp256k1_v0_9_2_extrakeys_ge_even_y(rustsecp256k1_v0_9_2_ge *r) { +static int rustsecp256k1_v0_10_0_extrakeys_ge_even_y(rustsecp256k1_v0_10_0_ge *r) { int y_parity = 0; - VERIFY_CHECK(!rustsecp256k1_v0_9_2_ge_is_infinity(r)); + VERIFY_CHECK(!rustsecp256k1_v0_10_0_ge_is_infinity(r)); - if (rustsecp256k1_v0_9_2_fe_is_odd(&r->y)) { - rustsecp256k1_v0_9_2_fe_negate(&r->y, &r->y, 1); + if (rustsecp256k1_v0_10_0_fe_is_odd(&r->y)) { + rustsecp256k1_v0_10_0_fe_negate(&r->y, &r->y, 1); y_parity = 1; } return y_parity; } -int rustsecp256k1_v0_9_2_xonly_pubkey_from_pubkey(const rustsecp256k1_v0_9_2_context* ctx, rustsecp256k1_v0_9_2_xonly_pubkey *xonly_pubkey, int *pk_parity, const rustsecp256k1_v0_9_2_pubkey *pubkey) { - rustsecp256k1_v0_9_2_ge pk; +int rustsecp256k1_v0_10_0_xonly_pubkey_from_pubkey(const rustsecp256k1_v0_10_0_context* ctx, rustsecp256k1_v0_10_0_xonly_pubkey *xonly_pubkey, int *pk_parity, const rustsecp256k1_v0_10_0_pubkey *pubkey) { + rustsecp256k1_v0_10_0_ge pk; int tmp; VERIFY_CHECK(ctx != NULL); ARG_CHECK(xonly_pubkey != NULL); ARG_CHECK(pubkey != NULL); - if (!rustsecp256k1_v0_9_2_pubkey_load(ctx, &pk, pubkey)) { + if (!rustsecp256k1_v0_10_0_pubkey_load(ctx, &pk, pubkey)) { return 0; } - tmp = rustsecp256k1_v0_9_2_extrakeys_ge_even_y(&pk); + tmp = rustsecp256k1_v0_10_0_extrakeys_ge_even_y(&pk); if (pk_parity != NULL) { *pk_parity = tmp; } - rustsecp256k1_v0_9_2_xonly_pubkey_save(xonly_pubkey, &pk); + rustsecp256k1_v0_10_0_xonly_pubkey_save(xonly_pubkey, &pk); return 1; } -int rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add(const rustsecp256k1_v0_9_2_context* ctx, rustsecp256k1_v0_9_2_pubkey *output_pubkey, const rustsecp256k1_v0_9_2_xonly_pubkey *internal_pubkey, const unsigned char *tweak32) { - rustsecp256k1_v0_9_2_ge pk; +int rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add(const rustsecp256k1_v0_10_0_context* ctx, rustsecp256k1_v0_10_0_pubkey *output_pubkey, const rustsecp256k1_v0_10_0_xonly_pubkey *internal_pubkey, const unsigned char *tweak32) { + rustsecp256k1_v0_10_0_ge pk; VERIFY_CHECK(ctx != NULL); ARG_CHECK(output_pubkey != NULL); @@ -124,16 +124,16 @@ int rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add(const rustsecp256k1_v0_9_2_conte ARG_CHECK(internal_pubkey != NULL); ARG_CHECK(tweak32 != NULL); - if (!rustsecp256k1_v0_9_2_xonly_pubkey_load(ctx, &pk, internal_pubkey) - || !rustsecp256k1_v0_9_2_ec_pubkey_tweak_add_helper(&pk, tweak32)) { + if (!rustsecp256k1_v0_10_0_xonly_pubkey_load(ctx, &pk, internal_pubkey) + || !rustsecp256k1_v0_10_0_ec_pubkey_tweak_add_helper(&pk, tweak32)) { return 0; } - rustsecp256k1_v0_9_2_pubkey_save(output_pubkey, &pk); + rustsecp256k1_v0_10_0_pubkey_save(output_pubkey, &pk); return 1; } -int rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add_check(const rustsecp256k1_v0_9_2_context* ctx, const unsigned char *tweaked_pubkey32, int tweaked_pk_parity, const rustsecp256k1_v0_9_2_xonly_pubkey *internal_pubkey, const unsigned char *tweak32) { - rustsecp256k1_v0_9_2_ge pk; +int rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add_check(const rustsecp256k1_v0_10_0_context* ctx, const unsigned char *tweaked_pubkey32, int tweaked_pk_parity, const rustsecp256k1_v0_10_0_xonly_pubkey *internal_pubkey, const unsigned char *tweak32) { + rustsecp256k1_v0_10_0_ge pk; unsigned char pk_expected32[32]; VERIFY_CHECK(ctx != NULL); @@ -141,31 +141,31 @@ int rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add_check(const rustsecp256k1_v0_9_2 ARG_CHECK(tweaked_pubkey32 != NULL); ARG_CHECK(tweak32 != NULL); - if (!rustsecp256k1_v0_9_2_xonly_pubkey_load(ctx, &pk, internal_pubkey) - || !rustsecp256k1_v0_9_2_ec_pubkey_tweak_add_helper(&pk, tweak32)) { + if (!rustsecp256k1_v0_10_0_xonly_pubkey_load(ctx, &pk, internal_pubkey) + || !rustsecp256k1_v0_10_0_ec_pubkey_tweak_add_helper(&pk, tweak32)) { return 0; } - rustsecp256k1_v0_9_2_fe_normalize_var(&pk.x); - rustsecp256k1_v0_9_2_fe_normalize_var(&pk.y); - rustsecp256k1_v0_9_2_fe_get_b32(pk_expected32, &pk.x); + rustsecp256k1_v0_10_0_fe_normalize_var(&pk.x); + rustsecp256k1_v0_10_0_fe_normalize_var(&pk.y); + rustsecp256k1_v0_10_0_fe_get_b32(pk_expected32, &pk.x); - return rustsecp256k1_v0_9_2_memcmp_var(&pk_expected32, tweaked_pubkey32, 32) == 0 - && rustsecp256k1_v0_9_2_fe_is_odd(&pk.y) == tweaked_pk_parity; + return rustsecp256k1_v0_10_0_memcmp_var(&pk_expected32, tweaked_pubkey32, 32) == 0 + && rustsecp256k1_v0_10_0_fe_is_odd(&pk.y) == tweaked_pk_parity; } -static void rustsecp256k1_v0_9_2_keypair_save(rustsecp256k1_v0_9_2_keypair *keypair, const rustsecp256k1_v0_9_2_scalar *sk, rustsecp256k1_v0_9_2_ge *pk) { - rustsecp256k1_v0_9_2_scalar_get_b32(&keypair->data[0], sk); - rustsecp256k1_v0_9_2_pubkey_save((rustsecp256k1_v0_9_2_pubkey *)&keypair->data[32], pk); +static void rustsecp256k1_v0_10_0_keypair_save(rustsecp256k1_v0_10_0_keypair *keypair, const rustsecp256k1_v0_10_0_scalar *sk, rustsecp256k1_v0_10_0_ge *pk) { + rustsecp256k1_v0_10_0_scalar_get_b32(&keypair->data[0], sk); + rustsecp256k1_v0_10_0_pubkey_save((rustsecp256k1_v0_10_0_pubkey *)&keypair->data[32], pk); } -static int rustsecp256k1_v0_9_2_keypair_seckey_load(const rustsecp256k1_v0_9_2_context* ctx, rustsecp256k1_v0_9_2_scalar *sk, const rustsecp256k1_v0_9_2_keypair *keypair) { +static int rustsecp256k1_v0_10_0_keypair_seckey_load(const rustsecp256k1_v0_10_0_context* ctx, rustsecp256k1_v0_10_0_scalar *sk, const rustsecp256k1_v0_10_0_keypair *keypair) { int ret; - ret = rustsecp256k1_v0_9_2_scalar_set_b32_seckey(sk, &keypair->data[0]); + ret = rustsecp256k1_v0_10_0_scalar_set_b32_seckey(sk, &keypair->data[0]); /* We can declassify ret here because sk is only zero if a keypair function * failed (which zeroes the keypair) and its return value is ignored. */ - rustsecp256k1_v0_9_2_declassify(ctx, &ret, sizeof(ret)); + rustsecp256k1_v0_10_0_declassify(ctx, &ret, sizeof(ret)); ARG_CHECK(ret); return ret; } @@ -173,45 +173,45 @@ static int rustsecp256k1_v0_9_2_keypair_seckey_load(const rustsecp256k1_v0_9_2_c /* Load a keypair into pk and sk (if non-NULL). This function declassifies pk * and ARG_CHECKs that the keypair is not invalid. It always initializes sk and * pk with dummy values. */ -static int rustsecp256k1_v0_9_2_keypair_load(const rustsecp256k1_v0_9_2_context* ctx, rustsecp256k1_v0_9_2_scalar *sk, rustsecp256k1_v0_9_2_ge *pk, const rustsecp256k1_v0_9_2_keypair *keypair) { +static int rustsecp256k1_v0_10_0_keypair_load(const rustsecp256k1_v0_10_0_context* ctx, rustsecp256k1_v0_10_0_scalar *sk, rustsecp256k1_v0_10_0_ge *pk, const rustsecp256k1_v0_10_0_keypair *keypair) { int ret; - const rustsecp256k1_v0_9_2_pubkey *pubkey = (const rustsecp256k1_v0_9_2_pubkey *)&keypair->data[32]; + const rustsecp256k1_v0_10_0_pubkey *pubkey = (const rustsecp256k1_v0_10_0_pubkey *)&keypair->data[32]; /* Need to declassify the pubkey because pubkey_load ARG_CHECKs if it's * invalid. */ - rustsecp256k1_v0_9_2_declassify(ctx, pubkey, sizeof(*pubkey)); - ret = rustsecp256k1_v0_9_2_pubkey_load(ctx, pk, pubkey); + rustsecp256k1_v0_10_0_declassify(ctx, pubkey, sizeof(*pubkey)); + ret = rustsecp256k1_v0_10_0_pubkey_load(ctx, pk, pubkey); if (sk != NULL) { - ret = ret && rustsecp256k1_v0_9_2_keypair_seckey_load(ctx, sk, keypair); + ret = ret && rustsecp256k1_v0_10_0_keypair_seckey_load(ctx, sk, keypair); } if (!ret) { - *pk = rustsecp256k1_v0_9_2_ge_const_g; + *pk = rustsecp256k1_v0_10_0_ge_const_g; if (sk != NULL) { - *sk = rustsecp256k1_v0_9_2_scalar_one; + *sk = rustsecp256k1_v0_10_0_scalar_one; } } return ret; } -int rustsecp256k1_v0_9_2_keypair_create(const rustsecp256k1_v0_9_2_context* ctx, rustsecp256k1_v0_9_2_keypair *keypair, const unsigned char *seckey32) { - rustsecp256k1_v0_9_2_scalar sk; - rustsecp256k1_v0_9_2_ge pk; +int rustsecp256k1_v0_10_0_keypair_create(const rustsecp256k1_v0_10_0_context* ctx, rustsecp256k1_v0_10_0_keypair *keypair, const unsigned char *seckey32) { + rustsecp256k1_v0_10_0_scalar sk; + rustsecp256k1_v0_10_0_ge pk; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(keypair != NULL); memset(keypair, 0, sizeof(*keypair)); - ARG_CHECK(rustsecp256k1_v0_9_2_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); + ARG_CHECK(rustsecp256k1_v0_10_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); ARG_CHECK(seckey32 != NULL); - ret = rustsecp256k1_v0_9_2_ec_pubkey_create_helper(&ctx->ecmult_gen_ctx, &sk, &pk, seckey32); - rustsecp256k1_v0_9_2_keypair_save(keypair, &sk, &pk); - rustsecp256k1_v0_9_2_memczero(keypair, sizeof(*keypair), !ret); + ret = rustsecp256k1_v0_10_0_ec_pubkey_create_helper(&ctx->ecmult_gen_ctx, &sk, &pk, seckey32); + rustsecp256k1_v0_10_0_keypair_save(keypair, &sk, &pk); + rustsecp256k1_v0_10_0_memczero(keypair, sizeof(*keypair), !ret); - rustsecp256k1_v0_9_2_scalar_clear(&sk); + rustsecp256k1_v0_10_0_scalar_clear(&sk); return ret; } -int rustsecp256k1_v0_9_2_keypair_sec(const rustsecp256k1_v0_9_2_context* ctx, unsigned char *seckey, const rustsecp256k1_v0_9_2_keypair *keypair) { +int rustsecp256k1_v0_10_0_keypair_sec(const rustsecp256k1_v0_10_0_context* ctx, unsigned char *seckey, const rustsecp256k1_v0_10_0_keypair *keypair) { VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); memset(seckey, 0, 32); @@ -221,7 +221,7 @@ int rustsecp256k1_v0_9_2_keypair_sec(const rustsecp256k1_v0_9_2_context* ctx, un return 1; } -int rustsecp256k1_v0_9_2_keypair_pub(const rustsecp256k1_v0_9_2_context* ctx, rustsecp256k1_v0_9_2_pubkey *pubkey, const rustsecp256k1_v0_9_2_keypair *keypair) { +int rustsecp256k1_v0_10_0_keypair_pub(const rustsecp256k1_v0_10_0_context* ctx, rustsecp256k1_v0_10_0_pubkey *pubkey, const rustsecp256k1_v0_10_0_keypair *keypair) { VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); memset(pubkey, 0, sizeof(*pubkey)); @@ -231,8 +231,8 @@ int rustsecp256k1_v0_9_2_keypair_pub(const rustsecp256k1_v0_9_2_context* ctx, ru return 1; } -int rustsecp256k1_v0_9_2_keypair_xonly_pub(const rustsecp256k1_v0_9_2_context* ctx, rustsecp256k1_v0_9_2_xonly_pubkey *pubkey, int *pk_parity, const rustsecp256k1_v0_9_2_keypair *keypair) { - rustsecp256k1_v0_9_2_ge pk; +int rustsecp256k1_v0_10_0_keypair_xonly_pub(const rustsecp256k1_v0_10_0_context* ctx, rustsecp256k1_v0_10_0_xonly_pubkey *pubkey, int *pk_parity, const rustsecp256k1_v0_10_0_keypair *keypair) { + rustsecp256k1_v0_10_0_ge pk; int tmp; VERIFY_CHECK(ctx != NULL); @@ -240,21 +240,21 @@ int rustsecp256k1_v0_9_2_keypair_xonly_pub(const rustsecp256k1_v0_9_2_context* c memset(pubkey, 0, sizeof(*pubkey)); ARG_CHECK(keypair != NULL); - if (!rustsecp256k1_v0_9_2_keypair_load(ctx, NULL, &pk, keypair)) { + if (!rustsecp256k1_v0_10_0_keypair_load(ctx, NULL, &pk, keypair)) { return 0; } - tmp = rustsecp256k1_v0_9_2_extrakeys_ge_even_y(&pk); + tmp = rustsecp256k1_v0_10_0_extrakeys_ge_even_y(&pk); if (pk_parity != NULL) { *pk_parity = tmp; } - rustsecp256k1_v0_9_2_xonly_pubkey_save(pubkey, &pk); + rustsecp256k1_v0_10_0_xonly_pubkey_save(pubkey, &pk); return 1; } -int rustsecp256k1_v0_9_2_keypair_xonly_tweak_add(const rustsecp256k1_v0_9_2_context* ctx, rustsecp256k1_v0_9_2_keypair *keypair, const unsigned char *tweak32) { - rustsecp256k1_v0_9_2_ge pk; - rustsecp256k1_v0_9_2_scalar sk; +int rustsecp256k1_v0_10_0_keypair_xonly_tweak_add(const rustsecp256k1_v0_10_0_context* ctx, rustsecp256k1_v0_10_0_keypair *keypair, const unsigned char *tweak32) { + rustsecp256k1_v0_10_0_ge pk; + rustsecp256k1_v0_10_0_scalar sk; int y_parity; int ret; @@ -262,23 +262,23 @@ int rustsecp256k1_v0_9_2_keypair_xonly_tweak_add(const rustsecp256k1_v0_9_2_cont ARG_CHECK(keypair != NULL); ARG_CHECK(tweak32 != NULL); - ret = rustsecp256k1_v0_9_2_keypair_load(ctx, &sk, &pk, keypair); + ret = rustsecp256k1_v0_10_0_keypair_load(ctx, &sk, &pk, keypair); memset(keypair, 0, sizeof(*keypair)); - y_parity = rustsecp256k1_v0_9_2_extrakeys_ge_even_y(&pk); + y_parity = rustsecp256k1_v0_10_0_extrakeys_ge_even_y(&pk); if (y_parity == 1) { - rustsecp256k1_v0_9_2_scalar_negate(&sk, &sk); + rustsecp256k1_v0_10_0_scalar_negate(&sk, &sk); } - ret &= rustsecp256k1_v0_9_2_ec_seckey_tweak_add_helper(&sk, tweak32); - ret &= rustsecp256k1_v0_9_2_ec_pubkey_tweak_add_helper(&pk, tweak32); + ret &= rustsecp256k1_v0_10_0_ec_seckey_tweak_add_helper(&sk, tweak32); + ret &= rustsecp256k1_v0_10_0_ec_pubkey_tweak_add_helper(&pk, tweak32); - rustsecp256k1_v0_9_2_declassify(ctx, &ret, sizeof(ret)); + rustsecp256k1_v0_10_0_declassify(ctx, &ret, sizeof(ret)); if (ret) { - rustsecp256k1_v0_9_2_keypair_save(keypair, &sk, &pk); + rustsecp256k1_v0_10_0_keypair_save(keypair, &sk, &pk); } - rustsecp256k1_v0_9_2_scalar_clear(&sk); + rustsecp256k1_v0_10_0_scalar_clear(&sk); return ret; } diff --git a/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h index 1f005b0dc..1612cd521 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h @@ -10,54 +10,54 @@ #include "../../../include/secp256k1_extrakeys.h" #include "main_impl.h" -static void test_exhaustive_extrakeys(const rustsecp256k1_v0_9_2_context *ctx, const rustsecp256k1_v0_9_2_ge* group) { - rustsecp256k1_v0_9_2_keypair keypair[EXHAUSTIVE_TEST_ORDER - 1]; - rustsecp256k1_v0_9_2_pubkey pubkey[EXHAUSTIVE_TEST_ORDER - 1]; - rustsecp256k1_v0_9_2_xonly_pubkey xonly_pubkey[EXHAUSTIVE_TEST_ORDER - 1]; +static void test_exhaustive_extrakeys(const rustsecp256k1_v0_10_0_context *ctx, const rustsecp256k1_v0_10_0_ge* group) { + rustsecp256k1_v0_10_0_keypair keypair[EXHAUSTIVE_TEST_ORDER - 1]; + rustsecp256k1_v0_10_0_pubkey pubkey[EXHAUSTIVE_TEST_ORDER - 1]; + rustsecp256k1_v0_10_0_xonly_pubkey xonly_pubkey[EXHAUSTIVE_TEST_ORDER - 1]; int parities[EXHAUSTIVE_TEST_ORDER - 1]; unsigned char xonly_pubkey_bytes[EXHAUSTIVE_TEST_ORDER - 1][32]; int i; for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { - rustsecp256k1_v0_9_2_fe fe; - rustsecp256k1_v0_9_2_scalar scalar_i; + rustsecp256k1_v0_10_0_fe fe; + rustsecp256k1_v0_10_0_scalar scalar_i; unsigned char buf[33]; int parity; - rustsecp256k1_v0_9_2_scalar_set_int(&scalar_i, i); - rustsecp256k1_v0_9_2_scalar_get_b32(buf, &scalar_i); + rustsecp256k1_v0_10_0_scalar_set_int(&scalar_i, i); + rustsecp256k1_v0_10_0_scalar_get_b32(buf, &scalar_i); /* Construct pubkey and keypair. */ - CHECK(rustsecp256k1_v0_9_2_keypair_create(ctx, &keypair[i - 1], buf)); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(ctx, &pubkey[i - 1], buf)); + CHECK(rustsecp256k1_v0_10_0_keypair_create(ctx, &keypair[i - 1], buf)); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(ctx, &pubkey[i - 1], buf)); /* Construct serialized xonly_pubkey from keypair. */ - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_pub(ctx, &xonly_pubkey[i - 1], &parities[i - 1], &keypair[i - 1])); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_serialize(ctx, xonly_pubkey_bytes[i - 1], &xonly_pubkey[i - 1])); + CHECK(rustsecp256k1_v0_10_0_keypair_xonly_pub(ctx, &xonly_pubkey[i - 1], &parities[i - 1], &keypair[i - 1])); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_serialize(ctx, xonly_pubkey_bytes[i - 1], &xonly_pubkey[i - 1])); /* Parse the xonly_pubkey back and verify it matches the previously serialized value. */ - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_parse(ctx, &xonly_pubkey[i - 1], xonly_pubkey_bytes[i - 1])); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_serialize(ctx, buf, &xonly_pubkey[i - 1])); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(xonly_pubkey_bytes[i - 1], buf, 32) == 0); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_parse(ctx, &xonly_pubkey[i - 1], xonly_pubkey_bytes[i - 1])); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_serialize(ctx, buf, &xonly_pubkey[i - 1])); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(xonly_pubkey_bytes[i - 1], buf, 32) == 0); /* Construct the xonly_pubkey from the pubkey, and verify it matches the same. */ - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_from_pubkey(ctx, &xonly_pubkey[i - 1], &parity, &pubkey[i - 1])); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_from_pubkey(ctx, &xonly_pubkey[i - 1], &parity, &pubkey[i - 1])); CHECK(parity == parities[i - 1]); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_serialize(ctx, buf, &xonly_pubkey[i - 1])); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(xonly_pubkey_bytes[i - 1], buf, 32) == 0); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_serialize(ctx, buf, &xonly_pubkey[i - 1])); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(xonly_pubkey_bytes[i - 1], buf, 32) == 0); /* Compare the xonly_pubkey bytes against the precomputed group. */ - rustsecp256k1_v0_9_2_fe_set_b32_mod(&fe, xonly_pubkey_bytes[i - 1]); - CHECK(rustsecp256k1_v0_9_2_fe_equal(&fe, &group[i].x)); + rustsecp256k1_v0_10_0_fe_set_b32_mod(&fe, xonly_pubkey_bytes[i - 1]); + CHECK(rustsecp256k1_v0_10_0_fe_equal(&fe, &group[i].x)); /* Check the parity against the precomputed group. */ fe = group[i].y; - rustsecp256k1_v0_9_2_fe_normalize_var(&fe); - CHECK(rustsecp256k1_v0_9_2_fe_is_odd(&fe) == parities[i - 1]); + rustsecp256k1_v0_10_0_fe_normalize_var(&fe); + CHECK(rustsecp256k1_v0_10_0_fe_is_odd(&fe) == parities[i - 1]); /* Verify that the higher half is identical to the lower half mirrored. */ if (i > EXHAUSTIVE_TEST_ORDER / 2) { - CHECK(rustsecp256k1_v0_9_2_memcmp_var(xonly_pubkey_bytes[i - 1], xonly_pubkey_bytes[EXHAUSTIVE_TEST_ORDER - i - 1], 32) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(xonly_pubkey_bytes[i - 1], xonly_pubkey_bytes[EXHAUSTIVE_TEST_ORDER - i - 1], 32) == 0); CHECK(parities[i - 1] == 1 - parities[EXHAUSTIVE_TEST_ORDER - i - 1]); } } diff --git a/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/tests_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/tests_impl.h index 76b519b97..e5143dec2 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/tests_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/tests_impl.h @@ -9,17 +9,12 @@ #include "../../../include/secp256k1_extrakeys.h" -static void set_counting_callbacks(rustsecp256k1_v0_9_2_context *ctx0, int *ecount) { - rustsecp256k1_v0_9_2_context_set_error_callback(ctx0, counting_illegal_callback_fn, ecount); - rustsecp256k1_v0_9_2_context_set_illegal_callback(ctx0, counting_illegal_callback_fn, ecount); -} - static void test_xonly_pubkey(void) { - rustsecp256k1_v0_9_2_pubkey pk; - rustsecp256k1_v0_9_2_xonly_pubkey xonly_pk, xonly_pk_tmp; - rustsecp256k1_v0_9_2_ge pk1; - rustsecp256k1_v0_9_2_ge pk2; - rustsecp256k1_v0_9_2_fe y; + rustsecp256k1_v0_10_0_pubkey pk; + rustsecp256k1_v0_10_0_xonly_pubkey xonly_pk, xonly_pk_tmp; + rustsecp256k1_v0_10_0_ge pk1; + rustsecp256k1_v0_10_0_ge pk2; + rustsecp256k1_v0_10_0_fe y; unsigned char sk[32]; unsigned char xy_sk[32]; unsigned char buf32[32]; @@ -28,104 +23,88 @@ static void test_xonly_pubkey(void) { int pk_parity; int i; - int ecount; - - set_counting_callbacks(CTX, &ecount); - - rustsecp256k1_v0_9_2_testrand256(sk); + rustsecp256k1_v0_10_0_testrand256(sk); memset(ones32, 0xFF, 32); - rustsecp256k1_v0_9_2_testrand256(xy_sk); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, &pk, sk) == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_from_pubkey(CTX, &xonly_pk, &pk_parity, &pk) == 1); + rustsecp256k1_v0_10_0_testrand256(xy_sk); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &pk, sk) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_from_pubkey(CTX, &xonly_pk, &pk_parity, &pk) == 1); /* Test xonly_pubkey_from_pubkey */ - ecount = 0; - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_from_pubkey(CTX, &xonly_pk, &pk_parity, &pk) == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_from_pubkey(CTX, NULL, &pk_parity, &pk) == 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_from_pubkey(CTX, &xonly_pk, NULL, &pk) == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_from_pubkey(CTX, &xonly_pk, &pk_parity, NULL) == 0); - CHECK(ecount == 2); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_from_pubkey(CTX, &xonly_pk, &pk_parity, &pk) == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_xonly_pubkey_from_pubkey(CTX, NULL, &pk_parity, &pk)); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_from_pubkey(CTX, &xonly_pk, NULL, &pk) == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_xonly_pubkey_from_pubkey(CTX, &xonly_pk, &pk_parity, NULL)); memset(&pk, 0, sizeof(pk)); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_from_pubkey(CTX, &xonly_pk, &pk_parity, &pk) == 0); - CHECK(ecount == 3); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_xonly_pubkey_from_pubkey(CTX, &xonly_pk, &pk_parity, &pk)); /* Choose a secret key such that the resulting pubkey and xonly_pubkey match. */ memset(sk, 0, sizeof(sk)); sk[0] = 1; - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, &pk, sk) == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_from_pubkey(CTX, &xonly_pk, &pk_parity, &pk) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pk, &xonly_pk, sizeof(pk)) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &pk, sk) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_from_pubkey(CTX, &xonly_pk, &pk_parity, &pk) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pk, &xonly_pk, sizeof(pk)) == 0); CHECK(pk_parity == 0); /* Choose a secret key such that pubkey and xonly_pubkey are each others * negation. */ sk[0] = 2; - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, &pk, sk) == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_from_pubkey(CTX, &xonly_pk, &pk_parity, &pk) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&xonly_pk, &pk, sizeof(xonly_pk)) != 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &pk, sk) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_from_pubkey(CTX, &xonly_pk, &pk_parity, &pk) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&xonly_pk, &pk, sizeof(xonly_pk)) != 0); CHECK(pk_parity == 1); - rustsecp256k1_v0_9_2_pubkey_load(CTX, &pk1, &pk); - rustsecp256k1_v0_9_2_pubkey_load(CTX, &pk2, (rustsecp256k1_v0_9_2_pubkey *) &xonly_pk); - CHECK(rustsecp256k1_v0_9_2_fe_equal(&pk1.x, &pk2.x) == 1); - rustsecp256k1_v0_9_2_fe_negate(&y, &pk2.y, 1); - CHECK(rustsecp256k1_v0_9_2_fe_equal(&pk1.y, &y) == 1); + rustsecp256k1_v0_10_0_pubkey_load(CTX, &pk1, &pk); + rustsecp256k1_v0_10_0_pubkey_load(CTX, &pk2, (rustsecp256k1_v0_10_0_pubkey *) &xonly_pk); + CHECK(rustsecp256k1_v0_10_0_fe_equal(&pk1.x, &pk2.x) == 1); + rustsecp256k1_v0_10_0_fe_negate(&y, &pk2.y, 1); + CHECK(rustsecp256k1_v0_10_0_fe_equal(&pk1.y, &y) == 1); /* Test xonly_pubkey_serialize and xonly_pubkey_parse */ - ecount = 0; - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_serialize(CTX, NULL, &xonly_pk) == 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_serialize(CTX, buf32, NULL) == 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(buf32, zeros64, 32) == 0); - CHECK(ecount == 2); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_xonly_pubkey_serialize(CTX, NULL, &xonly_pk)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_xonly_pubkey_serialize(CTX, buf32, NULL)); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(buf32, zeros64, 32) == 0); { /* A pubkey filled with 0s will fail to serialize due to pubkey_load * special casing. */ - rustsecp256k1_v0_9_2_xonly_pubkey pk_tmp; + rustsecp256k1_v0_10_0_xonly_pubkey pk_tmp; memset(&pk_tmp, 0, sizeof(pk_tmp)); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_serialize(CTX, buf32, &pk_tmp) == 0); + /* pubkey_load calls illegal callback */ + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_xonly_pubkey_serialize(CTX, buf32, &pk_tmp)); } - /* pubkey_load called illegal callback */ - CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_serialize(CTX, buf32, &xonly_pk) == 1); - ecount = 0; - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_parse(CTX, NULL, buf32) == 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_parse(CTX, &xonly_pk, NULL) == 0); - CHECK(ecount == 2); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_serialize(CTX, buf32, &xonly_pk) == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_xonly_pubkey_parse(CTX, NULL, buf32)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_xonly_pubkey_parse(CTX, &xonly_pk, NULL)); /* Serialization and parse roundtrip */ - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_from_pubkey(CTX, &xonly_pk, NULL, &pk) == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_serialize(CTX, buf32, &xonly_pk) == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_parse(CTX, &xonly_pk_tmp, buf32) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&xonly_pk, &xonly_pk_tmp, sizeof(xonly_pk)) == 0); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_from_pubkey(CTX, &xonly_pk, NULL, &pk) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_serialize(CTX, buf32, &xonly_pk) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_parse(CTX, &xonly_pk_tmp, buf32) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&xonly_pk, &xonly_pk_tmp, sizeof(xonly_pk)) == 0); /* Test parsing invalid field elements */ memset(&xonly_pk, 1, sizeof(xonly_pk)); /* Overflowing field element */ - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_parse(CTX, &xonly_pk, ones32) == 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_parse(CTX, &xonly_pk, ones32) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0); memset(&xonly_pk, 1, sizeof(xonly_pk)); /* There's no point with x-coordinate 0 on secp256k1 */ - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_parse(CTX, &xonly_pk, zeros64) == 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_parse(CTX, &xonly_pk, zeros64) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0); /* If a random 32-byte string can not be parsed with ec_pubkey_parse * (because interpreted as X coordinate it does not correspond to a point on * the curve) then xonly_pubkey_parse should fail as well. */ for (i = 0; i < COUNT; i++) { unsigned char rand33[33]; - rustsecp256k1_v0_9_2_testrand256(&rand33[1]); + rustsecp256k1_v0_10_0_testrand256(&rand33[1]); rand33[0] = SECP256K1_TAG_PUBKEY_EVEN; - if (!rustsecp256k1_v0_9_2_ec_pubkey_parse(CTX, &pk, rand33, 33)) { + if (!rustsecp256k1_v0_10_0_ec_pubkey_parse(CTX, &pk, rand33, 33)) { memset(&xonly_pk, 1, sizeof(xonly_pk)); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_parse(CTX, &xonly_pk, &rand33[1]) == 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_parse(CTX, &xonly_pk, &rand33[1]) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0); } else { - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_parse(CTX, &xonly_pk, &rand33[1]) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_parse(CTX, &xonly_pk, &rand33[1]) == 1); } } - CHECK(ecount == 2); } static void test_xonly_pubkey_comparison(void) { @@ -137,160 +116,133 @@ static void test_xonly_pubkey_comparison(void) { 0xde, 0x36, 0x0e, 0x87, 0x59, 0x8f, 0x3c, 0x01, 0x36, 0x2a, 0x2a, 0xb8, 0xc6, 0xf4, 0x5e, 0x4d, 0xb2, 0xc2, 0xd5, 0x03, 0xa7, 0xf9, 0xf1, 0x4f, 0xa8, 0xfa, 0x95, 0xa8, 0xe9, 0x69, 0x76, 0x1c }; - rustsecp256k1_v0_9_2_xonly_pubkey pk1; - rustsecp256k1_v0_9_2_xonly_pubkey pk2; - int ecount = 0; - - set_counting_callbacks(CTX, &ecount); - - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_parse(CTX, &pk1, pk1_ser) == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_parse(CTX, &pk2, pk2_ser) == 1); - - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_cmp(CTX, NULL, &pk2) < 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_cmp(CTX, &pk1, NULL) > 0); - CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_cmp(CTX, &pk1, &pk2) < 0); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_cmp(CTX, &pk2, &pk1) > 0); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_cmp(CTX, &pk1, &pk1) == 0); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_cmp(CTX, &pk2, &pk2) == 0); - CHECK(ecount == 2); + rustsecp256k1_v0_10_0_xonly_pubkey pk1; + rustsecp256k1_v0_10_0_xonly_pubkey pk2; + + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_parse(CTX, &pk1, pk1_ser) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_parse(CTX, &pk2, pk2_ser) == 1); + + CHECK_ILLEGAL_VOID(CTX, CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_cmp(CTX, NULL, &pk2) < 0)); + CHECK_ILLEGAL_VOID(CTX, CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_cmp(CTX, &pk1, NULL) > 0)); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_cmp(CTX, &pk1, &pk2) < 0); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_cmp(CTX, &pk2, &pk1) > 0); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_cmp(CTX, &pk1, &pk1) == 0); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_cmp(CTX, &pk2, &pk2) == 0); memset(&pk1, 0, sizeof(pk1)); /* illegal pubkey */ - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_cmp(CTX, &pk1, &pk2) < 0); - CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_cmp(CTX, &pk1, &pk1) == 0); - CHECK(ecount == 5); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_cmp(CTX, &pk2, &pk1) > 0); - CHECK(ecount == 6); + CHECK_ILLEGAL_VOID(CTX, CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_cmp(CTX, &pk1, &pk2) < 0)); + { + int32_t ecount = 0; + rustsecp256k1_v0_10_0_context_set_illegal_callback(CTX, counting_callback_fn, &ecount); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_cmp(CTX, &pk1, &pk1) == 0); + CHECK(ecount == 2); + rustsecp256k1_v0_10_0_context_set_illegal_callback(CTX, NULL, NULL); + } + CHECK_ILLEGAL_VOID(CTX, CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_cmp(CTX, &pk2, &pk1) > 0)); } static void test_xonly_pubkey_tweak(void) { unsigned char zeros64[64] = { 0 }; unsigned char overflows[32]; unsigned char sk[32]; - rustsecp256k1_v0_9_2_pubkey internal_pk; - rustsecp256k1_v0_9_2_xonly_pubkey internal_xonly_pk; - rustsecp256k1_v0_9_2_pubkey output_pk; + rustsecp256k1_v0_10_0_pubkey internal_pk; + rustsecp256k1_v0_10_0_xonly_pubkey internal_xonly_pk; + rustsecp256k1_v0_10_0_pubkey output_pk; int pk_parity; unsigned char tweak[32]; int i; - int ecount; - - set_counting_callbacks(CTX, &ecount); - memset(overflows, 0xff, sizeof(overflows)); - rustsecp256k1_v0_9_2_testrand256(tweak); - rustsecp256k1_v0_9_2_testrand256(sk); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, &internal_pk, sk) == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_from_pubkey(CTX, &internal_xonly_pk, &pk_parity, &internal_pk) == 1); - - ecount = 0; - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, tweak) == 1); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, tweak) == 1); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, tweak) == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add(CTX, NULL, &internal_xonly_pk, tweak) == 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add(CTX, &output_pk, NULL, tweak) == 0); - CHECK(ecount == 2); + rustsecp256k1_v0_10_0_testrand256(tweak); + rustsecp256k1_v0_10_0_testrand256(sk); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &internal_pk, sk) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_from_pubkey(CTX, &internal_xonly_pk, &pk_parity, &internal_pk) == 1); + + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, tweak) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, tweak) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, tweak) == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add(CTX, NULL, &internal_xonly_pk, tweak)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add(CTX, &output_pk, NULL, tweak)); /* NULL internal_xonly_pk zeroes the output_pk */ - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, NULL) == 0); - CHECK(ecount == 3); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, NULL)); /* NULL tweak zeroes the output_pk */ - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); /* Invalid tweak zeroes the output_pk */ - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, overflows) == 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, overflows) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); /* A zero tweak is fine */ - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, zeros64) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, zeros64) == 1); /* Fails if the resulting key was infinity */ for (i = 0; i < COUNT; i++) { - rustsecp256k1_v0_9_2_scalar scalar_tweak; + rustsecp256k1_v0_10_0_scalar scalar_tweak; /* Because sk may be negated before adding, we need to try with tweak = * sk as well as tweak = -sk. */ - rustsecp256k1_v0_9_2_scalar_set_b32(&scalar_tweak, sk, NULL); - rustsecp256k1_v0_9_2_scalar_negate(&scalar_tweak, &scalar_tweak); - rustsecp256k1_v0_9_2_scalar_get_b32(tweak, &scalar_tweak); - CHECK((rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, sk) == 0) - || (rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, tweak) == 0)); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); + rustsecp256k1_v0_10_0_scalar_set_b32(&scalar_tweak, sk, NULL); + rustsecp256k1_v0_10_0_scalar_negate(&scalar_tweak, &scalar_tweak); + rustsecp256k1_v0_10_0_scalar_get_b32(tweak, &scalar_tweak); + CHECK((rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, sk) == 0) + || (rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, tweak) == 0)); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); } /* Invalid pk with a valid tweak */ memset(&internal_xonly_pk, 0, sizeof(internal_xonly_pk)); - rustsecp256k1_v0_9_2_testrand256(tweak); - ecount = 0; - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, tweak) == 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); + rustsecp256k1_v0_10_0_testrand256(tweak); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, tweak)); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); } static void test_xonly_pubkey_tweak_check(void) { unsigned char zeros64[64] = { 0 }; unsigned char overflows[32]; unsigned char sk[32]; - rustsecp256k1_v0_9_2_pubkey internal_pk; - rustsecp256k1_v0_9_2_xonly_pubkey internal_xonly_pk; - rustsecp256k1_v0_9_2_pubkey output_pk; - rustsecp256k1_v0_9_2_xonly_pubkey output_xonly_pk; + rustsecp256k1_v0_10_0_pubkey internal_pk; + rustsecp256k1_v0_10_0_xonly_pubkey internal_xonly_pk; + rustsecp256k1_v0_10_0_pubkey output_pk; + rustsecp256k1_v0_10_0_xonly_pubkey output_xonly_pk; unsigned char output_pk32[32]; unsigned char buf32[32]; int pk_parity; unsigned char tweak[32]; - int ecount; - - set_counting_callbacks(CTX, &ecount); - memset(overflows, 0xff, sizeof(overflows)); - rustsecp256k1_v0_9_2_testrand256(tweak); - rustsecp256k1_v0_9_2_testrand256(sk); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, &internal_pk, sk) == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_from_pubkey(CTX, &internal_xonly_pk, &pk_parity, &internal_pk) == 1); - - ecount = 0; - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, tweak) == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_from_pubkey(CTX, &output_xonly_pk, &pk_parity, &output_pk) == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_serialize(CTX, buf32, &output_xonly_pk) == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add_check(CTX, buf32, pk_parity, &internal_xonly_pk, tweak) == 1); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add_check(CTX, buf32, pk_parity, &internal_xonly_pk, tweak) == 1); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add_check(CTX, buf32, pk_parity, &internal_xonly_pk, tweak) == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add_check(CTX, NULL, pk_parity, &internal_xonly_pk, tweak) == 0); - CHECK(ecount == 1); + rustsecp256k1_v0_10_0_testrand256(tweak); + rustsecp256k1_v0_10_0_testrand256(sk); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &internal_pk, sk) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_from_pubkey(CTX, &internal_xonly_pk, &pk_parity, &internal_pk) == 1); + + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, tweak) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_from_pubkey(CTX, &output_xonly_pk, &pk_parity, &output_pk) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_serialize(CTX, buf32, &output_xonly_pk) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add_check(CTX, buf32, pk_parity, &internal_xonly_pk, tweak) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add_check(CTX, buf32, pk_parity, &internal_xonly_pk, tweak) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add_check(CTX, buf32, pk_parity, &internal_xonly_pk, tweak) == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add_check(CTX, NULL, pk_parity, &internal_xonly_pk, tweak)); /* invalid pk_parity value */ - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add_check(CTX, buf32, 2, &internal_xonly_pk, tweak) == 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add_check(CTX, buf32, pk_parity, NULL, tweak) == 0); - CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add_check(CTX, buf32, pk_parity, &internal_xonly_pk, NULL) == 0); - CHECK(ecount == 3); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add_check(CTX, buf32, 2, &internal_xonly_pk, tweak) == 0); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add_check(CTX, buf32, pk_parity, NULL, tweak)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add_check(CTX, buf32, pk_parity, &internal_xonly_pk, NULL)); memset(tweak, 1, sizeof(tweak)); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_from_pubkey(CTX, &internal_xonly_pk, NULL, &internal_pk) == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, tweak) == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_from_pubkey(CTX, &output_xonly_pk, &pk_parity, &output_pk) == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_serialize(CTX, output_pk32, &output_xonly_pk) == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add_check(CTX, output_pk32, pk_parity, &internal_xonly_pk, tweak) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_from_pubkey(CTX, &internal_xonly_pk, NULL, &internal_pk) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, tweak) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_from_pubkey(CTX, &output_xonly_pk, &pk_parity, &output_pk) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_serialize(CTX, output_pk32, &output_xonly_pk) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add_check(CTX, output_pk32, pk_parity, &internal_xonly_pk, tweak) == 1); /* Wrong pk_parity */ - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add_check(CTX, output_pk32, !pk_parity, &internal_xonly_pk, tweak) == 0); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add_check(CTX, output_pk32, !pk_parity, &internal_xonly_pk, tweak) == 0); /* Wrong public key */ - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_serialize(CTX, buf32, &internal_xonly_pk) == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add_check(CTX, buf32, pk_parity, &internal_xonly_pk, tweak) == 0); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_serialize(CTX, buf32, &internal_xonly_pk) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add_check(CTX, buf32, pk_parity, &internal_xonly_pk, tweak) == 0); /* Overflowing tweak not allowed */ - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add_check(CTX, output_pk32, pk_parity, &internal_xonly_pk, overflows) == 0); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, overflows) == 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); - CHECK(ecount == 3); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add_check(CTX, output_pk32, pk_parity, &internal_xonly_pk, overflows) == 0); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, overflows) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); } /* Starts with an initial pubkey and recursively creates N_PUBKEYS - 1 @@ -299,29 +251,29 @@ static void test_xonly_pubkey_tweak_check(void) { #define N_PUBKEYS 32 static void test_xonly_pubkey_tweak_recursive(void) { unsigned char sk[32]; - rustsecp256k1_v0_9_2_pubkey pk[N_PUBKEYS]; + rustsecp256k1_v0_10_0_pubkey pk[N_PUBKEYS]; unsigned char pk_serialized[32]; unsigned char tweak[N_PUBKEYS - 1][32]; int i; - rustsecp256k1_v0_9_2_testrand256(sk); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, &pk[0], sk) == 1); + rustsecp256k1_v0_10_0_testrand256(sk); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &pk[0], sk) == 1); /* Add tweaks */ for (i = 0; i < N_PUBKEYS - 1; i++) { - rustsecp256k1_v0_9_2_xonly_pubkey xonly_pk; + rustsecp256k1_v0_10_0_xonly_pubkey xonly_pk; memset(tweak[i], i + 1, sizeof(tweak[i])); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_from_pubkey(CTX, &xonly_pk, NULL, &pk[i]) == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add(CTX, &pk[i + 1], &xonly_pk, tweak[i]) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_from_pubkey(CTX, &xonly_pk, NULL, &pk[i]) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add(CTX, &pk[i + 1], &xonly_pk, tweak[i]) == 1); } /* Verify tweaks */ for (i = N_PUBKEYS - 1; i > 0; i--) { - rustsecp256k1_v0_9_2_xonly_pubkey xonly_pk; + rustsecp256k1_v0_10_0_xonly_pubkey xonly_pk; int pk_parity; - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_from_pubkey(CTX, &xonly_pk, &pk_parity, &pk[i]) == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_serialize(CTX, pk_serialized, &xonly_pk) == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_from_pubkey(CTX, &xonly_pk, NULL, &pk[i - 1]) == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add_check(CTX, pk_serialized, pk_parity, &xonly_pk, tweak[i - 1]) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_from_pubkey(CTX, &xonly_pk, &pk_parity, &pk[i]) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_serialize(CTX, pk_serialized, &xonly_pk) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_from_pubkey(CTX, &xonly_pk, NULL, &pk[i - 1]) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add_check(CTX, pk_serialized, pk_parity, &xonly_pk, tweak[i - 1]) == 1); } } #undef N_PUBKEYS @@ -331,222 +283,187 @@ static void test_keypair(void) { unsigned char sk_tmp[32]; unsigned char zeros96[96] = { 0 }; unsigned char overflows[32]; - rustsecp256k1_v0_9_2_keypair keypair; - rustsecp256k1_v0_9_2_pubkey pk, pk_tmp; - rustsecp256k1_v0_9_2_xonly_pubkey xonly_pk, xonly_pk_tmp; + rustsecp256k1_v0_10_0_keypair keypair; + rustsecp256k1_v0_10_0_pubkey pk, pk_tmp; + rustsecp256k1_v0_10_0_xonly_pubkey xonly_pk, xonly_pk_tmp; int pk_parity, pk_parity_tmp; - int ecount; - - set_counting_callbacks(CTX, &ecount); - set_counting_callbacks(STATIC_CTX, &ecount); CHECK(sizeof(zeros96) == sizeof(keypair)); memset(overflows, 0xFF, sizeof(overflows)); /* Test keypair_create */ - ecount = 0; - rustsecp256k1_v0_9_2_testrand256(sk); - CHECK(rustsecp256k1_v0_9_2_keypair_create(CTX, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(zeros96, &keypair, sizeof(keypair)) != 0); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_9_2_keypair_create(CTX, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(zeros96, &keypair, sizeof(keypair)) != 0); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_9_2_keypair_create(CTX, NULL, sk) == 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_keypair_create(CTX, &keypair, NULL) == 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0); - CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_9_2_keypair_create(CTX, &keypair, sk) == 1); - CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_9_2_keypair_create(STATIC_CTX, &keypair, sk) == 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0); - CHECK(ecount == 3); + rustsecp256k1_v0_10_0_testrand256(sk); + CHECK(rustsecp256k1_v0_10_0_keypair_create(CTX, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(zeros96, &keypair, sizeof(keypair)) != 0); + CHECK(rustsecp256k1_v0_10_0_keypair_create(CTX, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(zeros96, &keypair, sizeof(keypair)) != 0); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_keypair_create(CTX, NULL, sk)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_keypair_create(CTX, &keypair, NULL)); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0); + CHECK(rustsecp256k1_v0_10_0_keypair_create(CTX, &keypair, sk) == 1); + CHECK_ILLEGAL(STATIC_CTX, rustsecp256k1_v0_10_0_keypair_create(STATIC_CTX, &keypair, sk)); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0); /* Invalid secret key */ - CHECK(rustsecp256k1_v0_9_2_keypair_create(CTX, &keypair, zeros96) == 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0); - CHECK(rustsecp256k1_v0_9_2_keypair_create(CTX, &keypair, overflows) == 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0); + CHECK(rustsecp256k1_v0_10_0_keypair_create(CTX, &keypair, zeros96) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0); + CHECK(rustsecp256k1_v0_10_0_keypair_create(CTX, &keypair, overflows) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0); /* Test keypair_pub */ - ecount = 0; - rustsecp256k1_v0_9_2_testrand256(sk); - CHECK(rustsecp256k1_v0_9_2_keypair_create(CTX, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_9_2_keypair_pub(CTX, &pk, &keypair) == 1); - CHECK(rustsecp256k1_v0_9_2_keypair_pub(CTX, NULL, &keypair) == 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_keypair_pub(CTX, &pk, NULL) == 0); - CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(zeros96, &pk, sizeof(pk)) == 0); + rustsecp256k1_v0_10_0_testrand256(sk); + CHECK(rustsecp256k1_v0_10_0_keypair_create(CTX, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_10_0_keypair_pub(CTX, &pk, &keypair) == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_keypair_pub(CTX, NULL, &keypair)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_keypair_pub(CTX, &pk, NULL)); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(zeros96, &pk, sizeof(pk)) == 0); /* Using an invalid keypair is fine for keypair_pub */ memset(&keypair, 0, sizeof(keypair)); - CHECK(rustsecp256k1_v0_9_2_keypair_pub(CTX, &pk, &keypair) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(zeros96, &pk, sizeof(pk)) == 0); + CHECK(rustsecp256k1_v0_10_0_keypair_pub(CTX, &pk, &keypair) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(zeros96, &pk, sizeof(pk)) == 0); /* keypair holds the same pubkey as pubkey_create */ - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, &pk, sk) == 1); - CHECK(rustsecp256k1_v0_9_2_keypair_create(CTX, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_9_2_keypair_pub(CTX, &pk_tmp, &keypair) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pk, &pk_tmp, sizeof(pk)) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &pk, sk) == 1); + CHECK(rustsecp256k1_v0_10_0_keypair_create(CTX, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_10_0_keypair_pub(CTX, &pk_tmp, &keypair) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pk, &pk_tmp, sizeof(pk)) == 0); /** Test keypair_xonly_pub **/ - ecount = 0; - rustsecp256k1_v0_9_2_testrand256(sk); - CHECK(rustsecp256k1_v0_9_2_keypair_create(CTX, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_pub(CTX, &xonly_pk, &pk_parity, &keypair) == 1); - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_pub(CTX, NULL, &pk_parity, &keypair) == 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_pub(CTX, &xonly_pk, NULL, &keypair) == 1); - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_pub(CTX, &xonly_pk, &pk_parity, NULL) == 0); - CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0); + rustsecp256k1_v0_10_0_testrand256(sk); + CHECK(rustsecp256k1_v0_10_0_keypair_create(CTX, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_10_0_keypair_xonly_pub(CTX, &xonly_pk, &pk_parity, &keypair) == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_keypair_xonly_pub(CTX, NULL, &pk_parity, &keypair)); + CHECK(rustsecp256k1_v0_10_0_keypair_xonly_pub(CTX, &xonly_pk, NULL, &keypair) == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_keypair_xonly_pub(CTX, &xonly_pk, &pk_parity, NULL)); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0); /* Using an invalid keypair will set the xonly_pk to 0 (first reset * xonly_pk). */ - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_pub(CTX, &xonly_pk, &pk_parity, &keypair) == 1); + CHECK(rustsecp256k1_v0_10_0_keypair_xonly_pub(CTX, &xonly_pk, &pk_parity, &keypair) == 1); memset(&keypair, 0, sizeof(keypair)); - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_pub(CTX, &xonly_pk, &pk_parity, &keypair) == 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0); - CHECK(ecount == 3); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_keypair_xonly_pub(CTX, &xonly_pk, &pk_parity, &keypair)); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0); /** keypair holds the same xonly pubkey as pubkey_create **/ - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, &pk, sk) == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_from_pubkey(CTX, &xonly_pk, &pk_parity, &pk) == 1); - CHECK(rustsecp256k1_v0_9_2_keypair_create(CTX, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_pub(CTX, &xonly_pk_tmp, &pk_parity_tmp, &keypair) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&xonly_pk, &xonly_pk_tmp, sizeof(pk)) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &pk, sk) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_from_pubkey(CTX, &xonly_pk, &pk_parity, &pk) == 1); + CHECK(rustsecp256k1_v0_10_0_keypair_create(CTX, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_10_0_keypair_xonly_pub(CTX, &xonly_pk_tmp, &pk_parity_tmp, &keypair) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&xonly_pk, &xonly_pk_tmp, sizeof(pk)) == 0); CHECK(pk_parity == pk_parity_tmp); /* Test keypair_seckey */ - ecount = 0; - rustsecp256k1_v0_9_2_testrand256(sk); - CHECK(rustsecp256k1_v0_9_2_keypair_create(CTX, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_9_2_keypair_sec(CTX, sk_tmp, &keypair) == 1); - CHECK(rustsecp256k1_v0_9_2_keypair_sec(CTX, NULL, &keypair) == 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_keypair_sec(CTX, sk_tmp, NULL) == 0); - CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(zeros96, sk_tmp, sizeof(sk_tmp)) == 0); + rustsecp256k1_v0_10_0_testrand256(sk); + CHECK(rustsecp256k1_v0_10_0_keypair_create(CTX, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_10_0_keypair_sec(CTX, sk_tmp, &keypair) == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_keypair_sec(CTX, NULL, &keypair)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_keypair_sec(CTX, sk_tmp, NULL)); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(zeros96, sk_tmp, sizeof(sk_tmp)) == 0); /* keypair returns the same seckey it got */ - CHECK(rustsecp256k1_v0_9_2_keypair_create(CTX, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_9_2_keypair_sec(CTX, sk_tmp, &keypair) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(sk, sk_tmp, sizeof(sk_tmp)) == 0); + CHECK(rustsecp256k1_v0_10_0_keypair_create(CTX, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_10_0_keypair_sec(CTX, sk_tmp, &keypair) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(sk, sk_tmp, sizeof(sk_tmp)) == 0); /* Using an invalid keypair is fine for keypair_seckey */ memset(&keypair, 0, sizeof(keypair)); - CHECK(rustsecp256k1_v0_9_2_keypair_sec(CTX, sk_tmp, &keypair) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(zeros96, sk_tmp, sizeof(sk_tmp)) == 0); - - rustsecp256k1_v0_9_2_context_set_error_callback(STATIC_CTX, NULL, NULL); - rustsecp256k1_v0_9_2_context_set_illegal_callback(STATIC_CTX, NULL, NULL); + CHECK(rustsecp256k1_v0_10_0_keypair_sec(CTX, sk_tmp, &keypair) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(zeros96, sk_tmp, sizeof(sk_tmp)) == 0); } static void test_keypair_add(void) { unsigned char sk[32]; - rustsecp256k1_v0_9_2_keypair keypair; + rustsecp256k1_v0_10_0_keypair keypair; unsigned char overflows[32]; unsigned char zeros96[96] = { 0 }; unsigned char tweak[32]; int i; - int ecount = 0; - - set_counting_callbacks(CTX, &ecount); CHECK(sizeof(zeros96) == sizeof(keypair)); - rustsecp256k1_v0_9_2_testrand256(sk); - rustsecp256k1_v0_9_2_testrand256(tweak); + rustsecp256k1_v0_10_0_testrand256(sk); + rustsecp256k1_v0_10_0_testrand256(tweak); memset(overflows, 0xFF, 32); - CHECK(rustsecp256k1_v0_9_2_keypair_create(CTX, &keypair, sk) == 1); - - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_tweak_add(CTX, &keypair, tweak) == 1); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_tweak_add(CTX, &keypair, tweak) == 1); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_tweak_add(CTX, &keypair, tweak) == 1); - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_tweak_add(CTX, NULL, tweak) == 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_tweak_add(CTX, &keypair, NULL) == 0); - CHECK(ecount == 2); + CHECK(rustsecp256k1_v0_10_0_keypair_create(CTX, &keypair, sk) == 1); + + CHECK(rustsecp256k1_v0_10_0_keypair_xonly_tweak_add(CTX, &keypair, tweak) == 1); + CHECK(rustsecp256k1_v0_10_0_keypair_xonly_tweak_add(CTX, &keypair, tweak) == 1); + CHECK(rustsecp256k1_v0_10_0_keypair_xonly_tweak_add(CTX, &keypair, tweak) == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_keypair_xonly_tweak_add(CTX, NULL, tweak)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_keypair_xonly_tweak_add(CTX, &keypair, NULL)); /* This does not set the keypair to zeroes */ - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&keypair, zeros96, sizeof(keypair)) != 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&keypair, zeros96, sizeof(keypair)) != 0); /* Invalid tweak zeroes the keypair */ - CHECK(rustsecp256k1_v0_9_2_keypair_create(CTX, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_tweak_add(CTX, &keypair, overflows) == 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0); + CHECK(rustsecp256k1_v0_10_0_keypair_create(CTX, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_10_0_keypair_xonly_tweak_add(CTX, &keypair, overflows) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0); /* A zero tweak is fine */ - CHECK(rustsecp256k1_v0_9_2_keypair_create(CTX, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_tweak_add(CTX, &keypair, zeros96) == 1); + CHECK(rustsecp256k1_v0_10_0_keypair_create(CTX, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_10_0_keypair_xonly_tweak_add(CTX, &keypair, zeros96) == 1); /* Fails if the resulting keypair was (sk=0, pk=infinity) */ for (i = 0; i < COUNT; i++) { - rustsecp256k1_v0_9_2_scalar scalar_tweak; - rustsecp256k1_v0_9_2_keypair keypair_tmp; - rustsecp256k1_v0_9_2_testrand256(sk); - CHECK(rustsecp256k1_v0_9_2_keypair_create(CTX, &keypair, sk) == 1); + rustsecp256k1_v0_10_0_scalar scalar_tweak; + rustsecp256k1_v0_10_0_keypair keypair_tmp; + rustsecp256k1_v0_10_0_testrand256(sk); + CHECK(rustsecp256k1_v0_10_0_keypair_create(CTX, &keypair, sk) == 1); memcpy(&keypair_tmp, &keypair, sizeof(keypair)); /* Because sk may be negated before adding, we need to try with tweak = * sk as well as tweak = -sk. */ - rustsecp256k1_v0_9_2_scalar_set_b32(&scalar_tweak, sk, NULL); - rustsecp256k1_v0_9_2_scalar_negate(&scalar_tweak, &scalar_tweak); - rustsecp256k1_v0_9_2_scalar_get_b32(tweak, &scalar_tweak); - CHECK((rustsecp256k1_v0_9_2_keypair_xonly_tweak_add(CTX, &keypair, sk) == 0) - || (rustsecp256k1_v0_9_2_keypair_xonly_tweak_add(CTX, &keypair_tmp, tweak) == 0)); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0 - || rustsecp256k1_v0_9_2_memcmp_var(&keypair_tmp, zeros96, sizeof(keypair_tmp)) == 0); + rustsecp256k1_v0_10_0_scalar_set_b32(&scalar_tweak, sk, NULL); + rustsecp256k1_v0_10_0_scalar_negate(&scalar_tweak, &scalar_tweak); + rustsecp256k1_v0_10_0_scalar_get_b32(tweak, &scalar_tweak); + CHECK((rustsecp256k1_v0_10_0_keypair_xonly_tweak_add(CTX, &keypair, sk) == 0) + || (rustsecp256k1_v0_10_0_keypair_xonly_tweak_add(CTX, &keypair_tmp, tweak) == 0)); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0 + || rustsecp256k1_v0_10_0_memcmp_var(&keypair_tmp, zeros96, sizeof(keypair_tmp)) == 0); } /* Invalid keypair with a valid tweak */ memset(&keypair, 0, sizeof(keypair)); - rustsecp256k1_v0_9_2_testrand256(tweak); - ecount = 0; - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_tweak_add(CTX, &keypair, tweak) == 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0); + rustsecp256k1_v0_10_0_testrand256(tweak); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_keypair_xonly_tweak_add(CTX, &keypair, tweak)); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0); /* Only seckey part of keypair invalid */ - CHECK(rustsecp256k1_v0_9_2_keypair_create(CTX, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_10_0_keypair_create(CTX, &keypair, sk) == 1); memset(&keypair, 0, 32); - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_tweak_add(CTX, &keypair, tweak) == 0); - CHECK(ecount == 2); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_keypair_xonly_tweak_add(CTX, &keypair, tweak)); /* Only pubkey part of keypair invalid */ - CHECK(rustsecp256k1_v0_9_2_keypair_create(CTX, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_10_0_keypair_create(CTX, &keypair, sk) == 1); memset(&keypair.data[32], 0, 64); - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_tweak_add(CTX, &keypair, tweak) == 0); - CHECK(ecount == 3); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_keypair_xonly_tweak_add(CTX, &keypair, tweak)); /* Check that the keypair_tweak_add implementation is correct */ - CHECK(rustsecp256k1_v0_9_2_keypair_create(CTX, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_10_0_keypair_create(CTX, &keypair, sk) == 1); for (i = 0; i < COUNT; i++) { - rustsecp256k1_v0_9_2_xonly_pubkey internal_pk; - rustsecp256k1_v0_9_2_xonly_pubkey output_pk; - rustsecp256k1_v0_9_2_pubkey output_pk_xy; - rustsecp256k1_v0_9_2_pubkey output_pk_expected; + rustsecp256k1_v0_10_0_xonly_pubkey internal_pk; + rustsecp256k1_v0_10_0_xonly_pubkey output_pk; + rustsecp256k1_v0_10_0_pubkey output_pk_xy; + rustsecp256k1_v0_10_0_pubkey output_pk_expected; unsigned char pk32[32]; unsigned char sk32[32]; int pk_parity; - rustsecp256k1_v0_9_2_testrand256(tweak); - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_pub(CTX, &internal_pk, NULL, &keypair) == 1); - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_tweak_add(CTX, &keypair, tweak) == 1); - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_pub(CTX, &output_pk, &pk_parity, &keypair) == 1); + rustsecp256k1_v0_10_0_testrand256(tweak); + CHECK(rustsecp256k1_v0_10_0_keypair_xonly_pub(CTX, &internal_pk, NULL, &keypair) == 1); + CHECK(rustsecp256k1_v0_10_0_keypair_xonly_tweak_add(CTX, &keypair, tweak) == 1); + CHECK(rustsecp256k1_v0_10_0_keypair_xonly_pub(CTX, &output_pk, &pk_parity, &keypair) == 1); /* Check that it passes xonly_pubkey_tweak_add_check */ - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_serialize(CTX, pk32, &output_pk) == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add_check(CTX, pk32, pk_parity, &internal_pk, tweak) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_serialize(CTX, pk32, &output_pk) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add_check(CTX, pk32, pk_parity, &internal_pk, tweak) == 1); /* Check that the resulting pubkey matches xonly_pubkey_tweak_add */ - CHECK(rustsecp256k1_v0_9_2_keypair_pub(CTX, &output_pk_xy, &keypair) == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add(CTX, &output_pk_expected, &internal_pk, tweak) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&output_pk_xy, &output_pk_expected, sizeof(output_pk_xy)) == 0); + CHECK(rustsecp256k1_v0_10_0_keypair_pub(CTX, &output_pk_xy, &keypair) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add(CTX, &output_pk_expected, &internal_pk, tweak) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&output_pk_xy, &output_pk_expected, sizeof(output_pk_xy)) == 0); /* Check that the secret key in the keypair is tweaked correctly */ - CHECK(rustsecp256k1_v0_9_2_keypair_sec(CTX, sk32, &keypair) == 1); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, &output_pk_expected, sk32) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&output_pk_xy, &output_pk_expected, sizeof(output_pk_xy)) == 0); + CHECK(rustsecp256k1_v0_10_0_keypair_sec(CTX, sk32, &keypair) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &output_pk_expected, sk32) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&output_pk_xy, &output_pk_expected, sizeof(output_pk_xy)) == 0); } } diff --git a/secp256k1-sys/depend/secp256k1/src/modules/recovery/Makefile.am.include b/secp256k1-sys/depend/secp256k1/src/modules/recovery/Makefile.am.include index 970b28a91..328f1e12b 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/recovery/Makefile.am.include +++ b/secp256k1-sys/depend/secp256k1/src/modules/recovery/Makefile.am.include @@ -1,4 +1,4 @@ -include_HEADERS += include/rustsecp256k1_v0_9_2_recovery.h +include_HEADERS += include/rustsecp256k1_v0_10_0_recovery.h noinst_HEADERS += src/modules/recovery/main_impl.h noinst_HEADERS += src/modules/recovery/tests_impl.h noinst_HEADERS += src/modules/recovery/tests_exhaustive_impl.h diff --git a/secp256k1-sys/depend/secp256k1/src/modules/recovery/bench_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/recovery/bench_impl.h index d982162e7..610e938f6 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/recovery/bench_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/recovery/bench_impl.h @@ -10,7 +10,7 @@ #include "../../../include/secp256k1_recovery.h" typedef struct { - rustsecp256k1_v0_9_2_context *ctx; + rustsecp256k1_v0_10_0_context *ctx; unsigned char msg[32]; unsigned char sig[64]; } bench_recover_data; @@ -18,16 +18,16 @@ typedef struct { static void bench_recover(void* arg, int iters) { int i; bench_recover_data *data = (bench_recover_data*)arg; - rustsecp256k1_v0_9_2_pubkey pubkey; + rustsecp256k1_v0_10_0_pubkey pubkey; unsigned char pubkeyc[33]; for (i = 0; i < iters; i++) { int j; size_t pubkeylen = 33; - rustsecp256k1_v0_9_2_ecdsa_recoverable_signature sig; - CHECK(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_parse_compact(data->ctx, &sig, data->sig, i % 2)); - CHECK(rustsecp256k1_v0_9_2_ecdsa_recover(data->ctx, &pubkey, &sig, data->msg)); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_serialize(data->ctx, pubkeyc, &pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED)); + rustsecp256k1_v0_10_0_ecdsa_recoverable_signature sig; + CHECK(rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_parse_compact(data->ctx, &sig, data->sig, i % 2)); + CHECK(rustsecp256k1_v0_10_0_ecdsa_recover(data->ctx, &pubkey, &sig, data->msg)); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_serialize(data->ctx, pubkeyc, &pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED)); for (j = 0; j < 32; j++) { data->sig[j + 32] = data->msg[j]; /* Move former message to S. */ data->msg[j] = data->sig[j]; /* Move former R to message. */ @@ -52,11 +52,11 @@ static void run_recovery_bench(int iters, int argc, char** argv) { bench_recover_data data; int d = argc == 1; - data.ctx = rustsecp256k1_v0_9_2_context_create(SECP256K1_CONTEXT_NONE); + data.ctx = rustsecp256k1_v0_10_0_context_create(SECP256K1_CONTEXT_NONE); if (d || have_flag(argc, argv, "ecdsa") || have_flag(argc, argv, "recover") || have_flag(argc, argv, "ecdsa_recover")) run_benchmark("ecdsa_recover", bench_recover, bench_recover_setup, NULL, &data, 10, iters); - rustsecp256k1_v0_9_2_context_destroy(data.ctx); + rustsecp256k1_v0_10_0_context_destroy(data.ctx); } #endif /* SECP256K1_MODULE_RECOVERY_BENCH_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/modules/recovery/main_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/recovery/main_impl.h index f8aee8ca2..9d7a0d486 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/recovery/main_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/recovery/main_impl.h @@ -9,34 +9,34 @@ #include "../../../include/secp256k1_recovery.h" -static void rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_load(const rustsecp256k1_v0_9_2_context* ctx, rustsecp256k1_v0_9_2_scalar* r, rustsecp256k1_v0_9_2_scalar* s, int* recid, const rustsecp256k1_v0_9_2_ecdsa_recoverable_signature* sig) { +static void rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_load(const rustsecp256k1_v0_10_0_context* ctx, rustsecp256k1_v0_10_0_scalar* r, rustsecp256k1_v0_10_0_scalar* s, int* recid, const rustsecp256k1_v0_10_0_ecdsa_recoverable_signature* sig) { (void)ctx; - if (sizeof(rustsecp256k1_v0_9_2_scalar) == 32) { - /* When the rustsecp256k1_v0_9_2_scalar type is exactly 32 byte, use its - * representation inside rustsecp256k1_v0_9_2_ecdsa_signature, as conversion is very fast. - * Note that rustsecp256k1_v0_9_2_ecdsa_signature_save must use the same representation. */ + if (sizeof(rustsecp256k1_v0_10_0_scalar) == 32) { + /* When the rustsecp256k1_v0_10_0_scalar type is exactly 32 byte, use its + * representation inside rustsecp256k1_v0_10_0_ecdsa_signature, as conversion is very fast. + * Note that rustsecp256k1_v0_10_0_ecdsa_signature_save must use the same representation. */ memcpy(r, &sig->data[0], 32); memcpy(s, &sig->data[32], 32); } else { - rustsecp256k1_v0_9_2_scalar_set_b32(r, &sig->data[0], NULL); - rustsecp256k1_v0_9_2_scalar_set_b32(s, &sig->data[32], NULL); + rustsecp256k1_v0_10_0_scalar_set_b32(r, &sig->data[0], NULL); + rustsecp256k1_v0_10_0_scalar_set_b32(s, &sig->data[32], NULL); } *recid = sig->data[64]; } -static void rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_save(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature* sig, const rustsecp256k1_v0_9_2_scalar* r, const rustsecp256k1_v0_9_2_scalar* s, int recid) { - if (sizeof(rustsecp256k1_v0_9_2_scalar) == 32) { +static void rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_save(rustsecp256k1_v0_10_0_ecdsa_recoverable_signature* sig, const rustsecp256k1_v0_10_0_scalar* r, const rustsecp256k1_v0_10_0_scalar* s, int recid) { + if (sizeof(rustsecp256k1_v0_10_0_scalar) == 32) { memcpy(&sig->data[0], r, 32); memcpy(&sig->data[32], s, 32); } else { - rustsecp256k1_v0_9_2_scalar_get_b32(&sig->data[0], r); - rustsecp256k1_v0_9_2_scalar_get_b32(&sig->data[32], s); + rustsecp256k1_v0_10_0_scalar_get_b32(&sig->data[0], r); + rustsecp256k1_v0_10_0_scalar_get_b32(&sig->data[32], s); } sig->data[64] = recid; } -int rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_parse_compact(const rustsecp256k1_v0_9_2_context* ctx, rustsecp256k1_v0_9_2_ecdsa_recoverable_signature* sig, const unsigned char *input64, int recid) { - rustsecp256k1_v0_9_2_scalar r, s; +int rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_parse_compact(const rustsecp256k1_v0_10_0_context* ctx, rustsecp256k1_v0_10_0_ecdsa_recoverable_signature* sig, const unsigned char *input64, int recid) { + rustsecp256k1_v0_10_0_scalar r, s; int ret = 1; int overflow = 0; @@ -45,110 +45,110 @@ int rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_parse_compact(const rustsec ARG_CHECK(input64 != NULL); ARG_CHECK(recid >= 0 && recid <= 3); - rustsecp256k1_v0_9_2_scalar_set_b32(&r, &input64[0], &overflow); + rustsecp256k1_v0_10_0_scalar_set_b32(&r, &input64[0], &overflow); ret &= !overflow; - rustsecp256k1_v0_9_2_scalar_set_b32(&s, &input64[32], &overflow); + rustsecp256k1_v0_10_0_scalar_set_b32(&s, &input64[32], &overflow); ret &= !overflow; if (ret) { - rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_save(sig, &r, &s, recid); + rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_save(sig, &r, &s, recid); } else { memset(sig, 0, sizeof(*sig)); } return ret; } -int rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_serialize_compact(const rustsecp256k1_v0_9_2_context* ctx, unsigned char *output64, int *recid, const rustsecp256k1_v0_9_2_ecdsa_recoverable_signature* sig) { - rustsecp256k1_v0_9_2_scalar r, s; +int rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_serialize_compact(const rustsecp256k1_v0_10_0_context* ctx, unsigned char *output64, int *recid, const rustsecp256k1_v0_10_0_ecdsa_recoverable_signature* sig) { + rustsecp256k1_v0_10_0_scalar r, s; VERIFY_CHECK(ctx != NULL); ARG_CHECK(output64 != NULL); ARG_CHECK(sig != NULL); ARG_CHECK(recid != NULL); - rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_load(ctx, &r, &s, recid, sig); - rustsecp256k1_v0_9_2_scalar_get_b32(&output64[0], &r); - rustsecp256k1_v0_9_2_scalar_get_b32(&output64[32], &s); + rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_load(ctx, &r, &s, recid, sig); + rustsecp256k1_v0_10_0_scalar_get_b32(&output64[0], &r); + rustsecp256k1_v0_10_0_scalar_get_b32(&output64[32], &s); return 1; } -int rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_convert(const rustsecp256k1_v0_9_2_context* ctx, rustsecp256k1_v0_9_2_ecdsa_signature* sig, const rustsecp256k1_v0_9_2_ecdsa_recoverable_signature* sigin) { - rustsecp256k1_v0_9_2_scalar r, s; +int rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_convert(const rustsecp256k1_v0_10_0_context* ctx, rustsecp256k1_v0_10_0_ecdsa_signature* sig, const rustsecp256k1_v0_10_0_ecdsa_recoverable_signature* sigin) { + rustsecp256k1_v0_10_0_scalar r, s; int recid; VERIFY_CHECK(ctx != NULL); ARG_CHECK(sig != NULL); ARG_CHECK(sigin != NULL); - rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, sigin); - rustsecp256k1_v0_9_2_ecdsa_signature_save(sig, &r, &s); + rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, sigin); + rustsecp256k1_v0_10_0_ecdsa_signature_save(sig, &r, &s); return 1; } -static int rustsecp256k1_v0_9_2_ecdsa_sig_recover(const rustsecp256k1_v0_9_2_scalar *sigr, const rustsecp256k1_v0_9_2_scalar* sigs, rustsecp256k1_v0_9_2_ge *pubkey, const rustsecp256k1_v0_9_2_scalar *message, int recid) { +static int rustsecp256k1_v0_10_0_ecdsa_sig_recover(const rustsecp256k1_v0_10_0_scalar *sigr, const rustsecp256k1_v0_10_0_scalar* sigs, rustsecp256k1_v0_10_0_ge *pubkey, const rustsecp256k1_v0_10_0_scalar *message, int recid) { unsigned char brx[32]; - rustsecp256k1_v0_9_2_fe fx; - rustsecp256k1_v0_9_2_ge x; - rustsecp256k1_v0_9_2_gej xj; - rustsecp256k1_v0_9_2_scalar rn, u1, u2; - rustsecp256k1_v0_9_2_gej qj; + rustsecp256k1_v0_10_0_fe fx; + rustsecp256k1_v0_10_0_ge x; + rustsecp256k1_v0_10_0_gej xj; + rustsecp256k1_v0_10_0_scalar rn, u1, u2; + rustsecp256k1_v0_10_0_gej qj; int r; - if (rustsecp256k1_v0_9_2_scalar_is_zero(sigr) || rustsecp256k1_v0_9_2_scalar_is_zero(sigs)) { + if (rustsecp256k1_v0_10_0_scalar_is_zero(sigr) || rustsecp256k1_v0_10_0_scalar_is_zero(sigs)) { return 0; } - rustsecp256k1_v0_9_2_scalar_get_b32(brx, sigr); - r = rustsecp256k1_v0_9_2_fe_set_b32_limit(&fx, brx); + rustsecp256k1_v0_10_0_scalar_get_b32(brx, sigr); + r = rustsecp256k1_v0_10_0_fe_set_b32_limit(&fx, brx); (void)r; VERIFY_CHECK(r); /* brx comes from a scalar, so is less than the order; certainly less than p */ if (recid & 2) { - if (rustsecp256k1_v0_9_2_fe_cmp_var(&fx, &rustsecp256k1_v0_9_2_ecdsa_const_p_minus_order) >= 0) { + if (rustsecp256k1_v0_10_0_fe_cmp_var(&fx, &rustsecp256k1_v0_10_0_ecdsa_const_p_minus_order) >= 0) { return 0; } - rustsecp256k1_v0_9_2_fe_add(&fx, &rustsecp256k1_v0_9_2_ecdsa_const_order_as_fe); + rustsecp256k1_v0_10_0_fe_add(&fx, &rustsecp256k1_v0_10_0_ecdsa_const_order_as_fe); } - if (!rustsecp256k1_v0_9_2_ge_set_xo_var(&x, &fx, recid & 1)) { + if (!rustsecp256k1_v0_10_0_ge_set_xo_var(&x, &fx, recid & 1)) { return 0; } - rustsecp256k1_v0_9_2_gej_set_ge(&xj, &x); - rustsecp256k1_v0_9_2_scalar_inverse_var(&rn, sigr); - rustsecp256k1_v0_9_2_scalar_mul(&u1, &rn, message); - rustsecp256k1_v0_9_2_scalar_negate(&u1, &u1); - rustsecp256k1_v0_9_2_scalar_mul(&u2, &rn, sigs); - rustsecp256k1_v0_9_2_ecmult(&qj, &xj, &u2, &u1); - rustsecp256k1_v0_9_2_ge_set_gej_var(pubkey, &qj); - return !rustsecp256k1_v0_9_2_gej_is_infinity(&qj); + rustsecp256k1_v0_10_0_gej_set_ge(&xj, &x); + rustsecp256k1_v0_10_0_scalar_inverse_var(&rn, sigr); + rustsecp256k1_v0_10_0_scalar_mul(&u1, &rn, message); + rustsecp256k1_v0_10_0_scalar_negate(&u1, &u1); + rustsecp256k1_v0_10_0_scalar_mul(&u2, &rn, sigs); + rustsecp256k1_v0_10_0_ecmult(&qj, &xj, &u2, &u1); + rustsecp256k1_v0_10_0_ge_set_gej_var(pubkey, &qj); + return !rustsecp256k1_v0_10_0_gej_is_infinity(&qj); } -int rustsecp256k1_v0_9_2_ecdsa_sign_recoverable(const rustsecp256k1_v0_9_2_context* ctx, rustsecp256k1_v0_9_2_ecdsa_recoverable_signature *signature, const unsigned char *msghash32, const unsigned char *seckey, rustsecp256k1_v0_9_2_nonce_function noncefp, const void* noncedata) { - rustsecp256k1_v0_9_2_scalar r, s; +int rustsecp256k1_v0_10_0_ecdsa_sign_recoverable(const rustsecp256k1_v0_10_0_context* ctx, rustsecp256k1_v0_10_0_ecdsa_recoverable_signature *signature, const unsigned char *msghash32, const unsigned char *seckey, rustsecp256k1_v0_10_0_nonce_function noncefp, const void* noncedata) { + rustsecp256k1_v0_10_0_scalar r, s; int ret, recid; VERIFY_CHECK(ctx != NULL); - ARG_CHECK(rustsecp256k1_v0_9_2_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); + ARG_CHECK(rustsecp256k1_v0_10_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); ARG_CHECK(msghash32 != NULL); ARG_CHECK(signature != NULL); ARG_CHECK(seckey != NULL); - ret = rustsecp256k1_v0_9_2_ecdsa_sign_inner(ctx, &r, &s, &recid, msghash32, seckey, noncefp, noncedata); - rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_save(signature, &r, &s, recid); + ret = rustsecp256k1_v0_10_0_ecdsa_sign_inner(ctx, &r, &s, &recid, msghash32, seckey, noncefp, noncedata); + rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_save(signature, &r, &s, recid); return ret; } -int rustsecp256k1_v0_9_2_ecdsa_recover(const rustsecp256k1_v0_9_2_context* ctx, rustsecp256k1_v0_9_2_pubkey *pubkey, const rustsecp256k1_v0_9_2_ecdsa_recoverable_signature *signature, const unsigned char *msghash32) { - rustsecp256k1_v0_9_2_ge q; - rustsecp256k1_v0_9_2_scalar r, s; - rustsecp256k1_v0_9_2_scalar m; +int rustsecp256k1_v0_10_0_ecdsa_recover(const rustsecp256k1_v0_10_0_context* ctx, rustsecp256k1_v0_10_0_pubkey *pubkey, const rustsecp256k1_v0_10_0_ecdsa_recoverable_signature *signature, const unsigned char *msghash32) { + rustsecp256k1_v0_10_0_ge q; + rustsecp256k1_v0_10_0_scalar r, s; + rustsecp256k1_v0_10_0_scalar m; int recid; VERIFY_CHECK(ctx != NULL); ARG_CHECK(msghash32 != NULL); ARG_CHECK(signature != NULL); ARG_CHECK(pubkey != NULL); - rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, signature); + rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, signature); VERIFY_CHECK(recid >= 0 && recid < 4); /* should have been caught in parse_compact */ - rustsecp256k1_v0_9_2_scalar_set_b32(&m, msghash32, NULL); - if (rustsecp256k1_v0_9_2_ecdsa_sig_recover(&r, &s, &q, &m, recid)) { - rustsecp256k1_v0_9_2_pubkey_save(pubkey, &q); + rustsecp256k1_v0_10_0_scalar_set_b32(&m, msghash32, NULL); + if (rustsecp256k1_v0_10_0_ecdsa_sig_recover(&r, &s, &q, &m, recid)) { + rustsecp256k1_v0_10_0_pubkey_save(pubkey, &q); return 1; } else { memset(pubkey, 0, sizeof(*pubkey)); diff --git a/secp256k1-sys/depend/secp256k1/src/modules/recovery/tests_exhaustive_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/recovery/tests_exhaustive_impl.h index 099b1b408..716153b8f 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/recovery/tests_exhaustive_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/recovery/tests_exhaustive_impl.h @@ -10,7 +10,7 @@ #include "main_impl.h" #include "../../../include/secp256k1_recovery.h" -static void test_exhaustive_recovery_sign(const rustsecp256k1_v0_9_2_context *ctx, const rustsecp256k1_v0_9_2_ge *group) { +static void test_exhaustive_recovery_sign(const rustsecp256k1_v0_10_0_context *ctx, const rustsecp256k1_v0_10_0_ge *group) { int i, j, k; uint64_t iter = 0; @@ -20,23 +20,23 @@ static void test_exhaustive_recovery_sign(const rustsecp256k1_v0_9_2_context *ct if (skip_section(&iter)) continue; for (k = 1; k < EXHAUSTIVE_TEST_ORDER; k++) { /* nonce */ const int starting_k = k; - rustsecp256k1_v0_9_2_fe r_dot_y_normalized; - rustsecp256k1_v0_9_2_ecdsa_recoverable_signature rsig; - rustsecp256k1_v0_9_2_ecdsa_signature sig; - rustsecp256k1_v0_9_2_scalar sk, msg, r, s, expected_r; + rustsecp256k1_v0_10_0_fe r_dot_y_normalized; + rustsecp256k1_v0_10_0_ecdsa_recoverable_signature rsig; + rustsecp256k1_v0_10_0_ecdsa_signature sig; + rustsecp256k1_v0_10_0_scalar sk, msg, r, s, expected_r; unsigned char sk32[32], msg32[32]; int expected_recid; int recid; int overflow; - rustsecp256k1_v0_9_2_scalar_set_int(&msg, i); - rustsecp256k1_v0_9_2_scalar_set_int(&sk, j); - rustsecp256k1_v0_9_2_scalar_get_b32(sk32, &sk); - rustsecp256k1_v0_9_2_scalar_get_b32(msg32, &msg); + rustsecp256k1_v0_10_0_scalar_set_int(&msg, i); + rustsecp256k1_v0_10_0_scalar_set_int(&sk, j); + rustsecp256k1_v0_10_0_scalar_get_b32(sk32, &sk); + rustsecp256k1_v0_10_0_scalar_get_b32(msg32, &msg); - rustsecp256k1_v0_9_2_ecdsa_sign_recoverable(ctx, &rsig, msg32, sk32, rustsecp256k1_v0_9_2_nonce_function_smallint, &k); + rustsecp256k1_v0_10_0_ecdsa_sign_recoverable(ctx, &rsig, msg32, sk32, rustsecp256k1_v0_10_0_nonce_function_smallint, &k); /* Check directly */ - rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, &rsig); + rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, &rsig); r_from_k(&expected_r, group, k, &overflow); CHECK(r == expected_r); CHECK((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER || @@ -49,18 +49,18 @@ static void test_exhaustive_recovery_sign(const rustsecp256k1_v0_9_2_context *ct * in the real group. */ expected_recid = overflow ? 2 : 0; r_dot_y_normalized = group[k].y; - rustsecp256k1_v0_9_2_fe_normalize(&r_dot_y_normalized); + rustsecp256k1_v0_10_0_fe_normalize(&r_dot_y_normalized); /* Also the recovery id is flipped depending if we hit the low-s branch */ if ((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER) { - expected_recid |= rustsecp256k1_v0_9_2_fe_is_odd(&r_dot_y_normalized); + expected_recid |= rustsecp256k1_v0_10_0_fe_is_odd(&r_dot_y_normalized); } else { - expected_recid |= !rustsecp256k1_v0_9_2_fe_is_odd(&r_dot_y_normalized); + expected_recid |= !rustsecp256k1_v0_10_0_fe_is_odd(&r_dot_y_normalized); } CHECK(recid == expected_recid); /* Convert to a standard sig then check */ - rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig); - rustsecp256k1_v0_9_2_ecdsa_signature_load(ctx, &r, &s, &sig); + rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig); + rustsecp256k1_v0_10_0_ecdsa_signature_load(ctx, &r, &s, &sig); /* Note that we compute expected_r *after* signing -- this is important * because our nonce-computing function function might change k during * signing. */ @@ -78,7 +78,7 @@ static void test_exhaustive_recovery_sign(const rustsecp256k1_v0_9_2_context *ct } } -static void test_exhaustive_recovery_verify(const rustsecp256k1_v0_9_2_context *ctx, const rustsecp256k1_v0_9_2_ge *group) { +static void test_exhaustive_recovery_verify(const rustsecp256k1_v0_10_0_context *ctx, const rustsecp256k1_v0_10_0_ge *group) { /* This is essentially a copy of test_exhaustive_verify, with recovery added */ int s, r, msg, key; uint64_t iter = 0; @@ -86,41 +86,41 @@ static void test_exhaustive_recovery_verify(const rustsecp256k1_v0_9_2_context * for (r = 1; r < EXHAUSTIVE_TEST_ORDER; r++) { for (msg = 1; msg < EXHAUSTIVE_TEST_ORDER; msg++) { for (key = 1; key < EXHAUSTIVE_TEST_ORDER; key++) { - rustsecp256k1_v0_9_2_ge nonconst_ge; - rustsecp256k1_v0_9_2_ecdsa_recoverable_signature rsig; - rustsecp256k1_v0_9_2_ecdsa_signature sig; - rustsecp256k1_v0_9_2_pubkey pk; - rustsecp256k1_v0_9_2_scalar sk_s, msg_s, r_s, s_s; - rustsecp256k1_v0_9_2_scalar s_times_k_s, msg_plus_r_times_sk_s; + rustsecp256k1_v0_10_0_ge nonconst_ge; + rustsecp256k1_v0_10_0_ecdsa_recoverable_signature rsig; + rustsecp256k1_v0_10_0_ecdsa_signature sig; + rustsecp256k1_v0_10_0_pubkey pk; + rustsecp256k1_v0_10_0_scalar sk_s, msg_s, r_s, s_s; + rustsecp256k1_v0_10_0_scalar s_times_k_s, msg_plus_r_times_sk_s; int recid = 0; int k, should_verify; unsigned char msg32[32]; if (skip_section(&iter)) continue; - rustsecp256k1_v0_9_2_scalar_set_int(&s_s, s); - rustsecp256k1_v0_9_2_scalar_set_int(&r_s, r); - rustsecp256k1_v0_9_2_scalar_set_int(&msg_s, msg); - rustsecp256k1_v0_9_2_scalar_set_int(&sk_s, key); - rustsecp256k1_v0_9_2_scalar_get_b32(msg32, &msg_s); + rustsecp256k1_v0_10_0_scalar_set_int(&s_s, s); + rustsecp256k1_v0_10_0_scalar_set_int(&r_s, r); + rustsecp256k1_v0_10_0_scalar_set_int(&msg_s, msg); + rustsecp256k1_v0_10_0_scalar_set_int(&sk_s, key); + rustsecp256k1_v0_10_0_scalar_get_b32(msg32, &msg_s); /* Verify by hand */ /* Run through every k value that gives us this r and check that *one* works. * Note there could be none, there could be multiple, ECDSA is weird. */ should_verify = 0; for (k = 0; k < EXHAUSTIVE_TEST_ORDER; k++) { - rustsecp256k1_v0_9_2_scalar check_x_s; + rustsecp256k1_v0_10_0_scalar check_x_s; r_from_k(&check_x_s, group, k, NULL); if (r_s == check_x_s) { - rustsecp256k1_v0_9_2_scalar_set_int(&s_times_k_s, k); - rustsecp256k1_v0_9_2_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s); - rustsecp256k1_v0_9_2_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s); - rustsecp256k1_v0_9_2_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s); - should_verify |= rustsecp256k1_v0_9_2_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s); + rustsecp256k1_v0_10_0_scalar_set_int(&s_times_k_s, k); + rustsecp256k1_v0_10_0_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s); + rustsecp256k1_v0_10_0_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s); + rustsecp256k1_v0_10_0_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s); + should_verify |= rustsecp256k1_v0_10_0_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s); } } /* nb we have a "high s" rule */ - should_verify &= !rustsecp256k1_v0_9_2_scalar_is_high(&s_s); + should_verify &= !rustsecp256k1_v0_10_0_scalar_is_high(&s_s); /* We would like to try recovering the pubkey and checking that it matches, * but pubkey recovery is impossible in the exhaustive tests (the reason @@ -128,19 +128,19 @@ static void test_exhaustive_recovery_verify(const rustsecp256k1_v0_9_2_context * * overlap between the sets, so there are no valid signatures). */ /* Verify by converting to a standard signature and calling verify */ - rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_save(&rsig, &r_s, &s_s, recid); - rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig); + rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_save(&rsig, &r_s, &s_s, recid); + rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig); memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge)); - rustsecp256k1_v0_9_2_pubkey_save(&pk, &nonconst_ge); + rustsecp256k1_v0_10_0_pubkey_save(&pk, &nonconst_ge); CHECK(should_verify == - rustsecp256k1_v0_9_2_ecdsa_verify(ctx, &sig, msg32, &pk)); + rustsecp256k1_v0_10_0_ecdsa_verify(ctx, &sig, msg32, &pk)); } } } } } -static void test_exhaustive_recovery(const rustsecp256k1_v0_9_2_context *ctx, const rustsecp256k1_v0_9_2_ge *group) { +static void test_exhaustive_recovery(const rustsecp256k1_v0_10_0_context *ctx, const rustsecp256k1_v0_10_0_ge *group) { test_exhaustive_recovery_sign(ctx, group); test_exhaustive_recovery_verify(ctx, group); } diff --git a/secp256k1-sys/depend/secp256k1/src/modules/recovery/tests_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/recovery/tests_impl.h index c058f0ebe..9143f9ba7 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/recovery/tests_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/recovery/tests_impl.h @@ -25,18 +25,17 @@ static int recovery_test_nonce_function(unsigned char *nonce32, const unsigned c } /* On the next run, return a valid nonce, but flip a coin as to whether or not to fail signing. */ memset(nonce32, 1, 32); - return rustsecp256k1_v0_9_2_testrand_bits(1); + return rustsecp256k1_v0_10_0_testrand_bits(1); } static void test_ecdsa_recovery_api(void) { /* Setup contexts that just count errors */ - rustsecp256k1_v0_9_2_pubkey pubkey; - rustsecp256k1_v0_9_2_pubkey recpubkey; - rustsecp256k1_v0_9_2_ecdsa_signature normal_sig; - rustsecp256k1_v0_9_2_ecdsa_recoverable_signature recsig; + rustsecp256k1_v0_10_0_pubkey pubkey; + rustsecp256k1_v0_10_0_pubkey recpubkey; + rustsecp256k1_v0_10_0_ecdsa_signature normal_sig; + rustsecp256k1_v0_10_0_ecdsa_recoverable_signature recsig; unsigned char privkey[32] = { 1 }; unsigned char message[32] = { 2 }; - int32_t ecount = 0; int recid = 0; unsigned char sig[74]; unsigned char zero_privkey[32] = { 0 }; @@ -45,144 +44,110 @@ static void test_ecdsa_recovery_api(void) { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; - rustsecp256k1_v0_9_2_context_set_error_callback(CTX, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_9_2_context_set_illegal_callback(CTX, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_9_2_context_set_error_callback(STATIC_CTX, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_9_2_context_set_illegal_callback(STATIC_CTX, counting_illegal_callback_fn, &ecount); - /* Construct and verify corresponding public key. */ - CHECK(rustsecp256k1_v0_9_2_ec_seckey_verify(CTX, privkey) == 1); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, &pubkey, privkey) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_seckey_verify(CTX, privkey) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &pubkey, privkey) == 1); /* Check bad contexts and NULLs for signing */ - ecount = 0; - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign_recoverable(CTX, &recsig, message, privkey, NULL, NULL) == 1); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign_recoverable(CTX, NULL, message, privkey, NULL, NULL) == 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign_recoverable(CTX, &recsig, NULL, privkey, NULL, NULL) == 0); - CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign_recoverable(CTX, &recsig, message, NULL, NULL, NULL) == 0); - CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign_recoverable(STATIC_CTX, &recsig, message, privkey, NULL, NULL) == 0); - CHECK(ecount == 4); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign_recoverable(CTX, &recsig, message, privkey, NULL, NULL) == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_sign_recoverable(CTX, NULL, message, privkey, NULL, NULL)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_sign_recoverable(CTX, &recsig, NULL, privkey, NULL, NULL)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_sign_recoverable(CTX, &recsig, message, NULL, NULL, NULL)); + CHECK_ILLEGAL(STATIC_CTX, rustsecp256k1_v0_10_0_ecdsa_sign_recoverable(STATIC_CTX, &recsig, message, privkey, NULL, NULL)); /* This will fail or succeed randomly, and in either case will not ARG_CHECK failure */ - rustsecp256k1_v0_9_2_ecdsa_sign_recoverable(CTX, &recsig, message, privkey, recovery_test_nonce_function, NULL); - CHECK(ecount == 4); + rustsecp256k1_v0_10_0_ecdsa_sign_recoverable(CTX, &recsig, message, privkey, recovery_test_nonce_function, NULL); /* These will all fail, but not in ARG_CHECK way */ - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign_recoverable(CTX, &recsig, message, zero_privkey, NULL, NULL) == 0); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign_recoverable(CTX, &recsig, message, over_privkey, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign_recoverable(CTX, &recsig, message, zero_privkey, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign_recoverable(CTX, &recsig, message, over_privkey, NULL, NULL) == 0); /* This one will succeed. */ - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign_recoverable(CTX, &recsig, message, privkey, NULL, NULL) == 1); - CHECK(ecount == 4); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign_recoverable(CTX, &recsig, message, privkey, NULL, NULL) == 1); /* Check signing with a goofy nonce function */ /* Check bad contexts and NULLs for recovery */ - ecount = 0; - CHECK(rustsecp256k1_v0_9_2_ecdsa_recover(CTX, &recpubkey, &recsig, message) == 1); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_9_2_ecdsa_recover(CTX, NULL, &recsig, message) == 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_recover(CTX, &recpubkey, NULL, message) == 0); - CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_9_2_ecdsa_recover(CTX, &recpubkey, &recsig, NULL) == 0); - CHECK(ecount == 3); + CHECK(rustsecp256k1_v0_10_0_ecdsa_recover(CTX, &recpubkey, &recsig, message) == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_recover(CTX, NULL, &recsig, message)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_recover(CTX, &recpubkey, NULL, message)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_recover(CTX, &recpubkey, &recsig, NULL)); /* Check NULLs for conversion */ - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign(CTX, &normal_sig, message, privkey, NULL, NULL) == 1); - ecount = 0; - CHECK(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_convert(CTX, NULL, &recsig) == 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_convert(CTX, &normal_sig, NULL) == 0); - CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_convert(CTX, &normal_sig, &recsig) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign(CTX, &normal_sig, message, privkey, NULL, NULL) == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_convert(CTX, NULL, &recsig)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_convert(CTX, &normal_sig, NULL)); + CHECK(rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_convert(CTX, &normal_sig, &recsig) == 1); /* Check NULLs for de/serialization */ - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign_recoverable(CTX, &recsig, message, privkey, NULL, NULL) == 1); - ecount = 0; - CHECK(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_serialize_compact(CTX, NULL, &recid, &recsig) == 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_serialize_compact(CTX, sig, NULL, &recsig) == 0); - CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_serialize_compact(CTX, sig, &recid, NULL) == 0); - CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_serialize_compact(CTX, sig, &recid, &recsig) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign_recoverable(CTX, &recsig, message, privkey, NULL, NULL) == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_serialize_compact(CTX, NULL, &recid, &recsig)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_serialize_compact(CTX, sig, NULL, &recsig)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_serialize_compact(CTX, sig, &recid, NULL)); + CHECK(rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_serialize_compact(CTX, sig, &recid, &recsig) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_parse_compact(CTX, NULL, sig, recid) == 0); - CHECK(ecount == 4); - CHECK(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_parse_compact(CTX, &recsig, NULL, recid) == 0); - CHECK(ecount == 5); - CHECK(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_parse_compact(CTX, &recsig, sig, -1) == 0); - CHECK(ecount == 6); - CHECK(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_parse_compact(CTX, &recsig, sig, 5) == 0); - CHECK(ecount == 7); - /* overflow in signature will fail but not affect ecount */ + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_parse_compact(CTX, NULL, sig, recid)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_parse_compact(CTX, &recsig, NULL, recid)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_parse_compact(CTX, &recsig, sig, -1)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_parse_compact(CTX, &recsig, sig, 5)); + /* overflow in signature will not result in calling illegal_callback */ memcpy(sig, over_privkey, 32); - CHECK(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_parse_compact(CTX, &recsig, sig, recid) == 0); - CHECK(ecount == 7); - - /* cleanup */ - rustsecp256k1_v0_9_2_context_set_error_callback(STATIC_CTX, NULL, NULL); - rustsecp256k1_v0_9_2_context_set_illegal_callback(STATIC_CTX, NULL, NULL); + CHECK(rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_parse_compact(CTX, &recsig, sig, recid) == 0); } static void test_ecdsa_recovery_end_to_end(void) { unsigned char extra[32] = {0x00}; unsigned char privkey[32]; unsigned char message[32]; - rustsecp256k1_v0_9_2_ecdsa_signature signature[5]; - rustsecp256k1_v0_9_2_ecdsa_recoverable_signature rsignature[5]; + rustsecp256k1_v0_10_0_ecdsa_signature signature[5]; + rustsecp256k1_v0_10_0_ecdsa_recoverable_signature rsignature[5]; unsigned char sig[74]; - rustsecp256k1_v0_9_2_pubkey pubkey; - rustsecp256k1_v0_9_2_pubkey recpubkey; + rustsecp256k1_v0_10_0_pubkey pubkey; + rustsecp256k1_v0_10_0_pubkey recpubkey; int recid = 0; /* Generate a random key and message. */ { - rustsecp256k1_v0_9_2_scalar msg, key; + rustsecp256k1_v0_10_0_scalar msg, key; random_scalar_order_test(&msg); random_scalar_order_test(&key); - rustsecp256k1_v0_9_2_scalar_get_b32(privkey, &key); - rustsecp256k1_v0_9_2_scalar_get_b32(message, &msg); + rustsecp256k1_v0_10_0_scalar_get_b32(privkey, &key); + rustsecp256k1_v0_10_0_scalar_get_b32(message, &msg); } /* Construct and verify corresponding public key. */ - CHECK(rustsecp256k1_v0_9_2_ec_seckey_verify(CTX, privkey) == 1); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, &pubkey, privkey) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_seckey_verify(CTX, privkey) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &pubkey, privkey) == 1); /* Serialize/parse compact and verify/recover. */ extra[0] = 0; - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign_recoverable(CTX, &rsignature[0], message, privkey, NULL, NULL) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign(CTX, &signature[0], message, privkey, NULL, NULL) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign_recoverable(CTX, &rsignature[4], message, privkey, NULL, NULL) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign_recoverable(CTX, &rsignature[1], message, privkey, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign_recoverable(CTX, &rsignature[0], message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign(CTX, &signature[0], message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign_recoverable(CTX, &rsignature[4], message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign_recoverable(CTX, &rsignature[1], message, privkey, NULL, extra) == 1); extra[31] = 1; - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign_recoverable(CTX, &rsignature[2], message, privkey, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign_recoverable(CTX, &rsignature[2], message, privkey, NULL, extra) == 1); extra[31] = 0; extra[0] = 1; - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign_recoverable(CTX, &rsignature[3], message, privkey, NULL, extra) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_serialize_compact(CTX, sig, &recid, &rsignature[4]) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_convert(CTX, &signature[4], &rsignature[4]) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&signature[4], &signature[0], 64) == 0); - CHECK(rustsecp256k1_v0_9_2_ecdsa_verify(CTX, &signature[4], message, &pubkey) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign_recoverable(CTX, &rsignature[3], message, privkey, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_serialize_compact(CTX, sig, &recid, &rsignature[4]) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_convert(CTX, &signature[4], &rsignature[4]) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&signature[4], &signature[0], 64) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_verify(CTX, &signature[4], message, &pubkey) == 1); memset(&rsignature[4], 0, sizeof(rsignature[4])); - CHECK(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_parse_compact(CTX, &rsignature[4], sig, recid) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_convert(CTX, &signature[4], &rsignature[4]) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_verify(CTX, &signature[4], message, &pubkey) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_parse_compact(CTX, &rsignature[4], sig, recid) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_convert(CTX, &signature[4], &rsignature[4]) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_verify(CTX, &signature[4], message, &pubkey) == 1); /* Parse compact (with recovery id) and recover. */ - CHECK(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_parse_compact(CTX, &rsignature[4], sig, recid) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_recover(CTX, &recpubkey, &rsignature[4], message) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pubkey, &recpubkey, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_parse_compact(CTX, &rsignature[4], sig, recid) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_recover(CTX, &recpubkey, &rsignature[4], message) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pubkey, &recpubkey, sizeof(pubkey)) == 0); /* Serialize/destroy/parse signature and verify again. */ - CHECK(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_serialize_compact(CTX, sig, &recid, &rsignature[4]) == 1); - sig[rustsecp256k1_v0_9_2_testrand_bits(6)] += 1 + rustsecp256k1_v0_9_2_testrand_int(255); - CHECK(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_parse_compact(CTX, &rsignature[4], sig, recid) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_convert(CTX, &signature[4], &rsignature[4]) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_verify(CTX, &signature[4], message, &pubkey) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_serialize_compact(CTX, sig, &recid, &rsignature[4]) == 1); + sig[rustsecp256k1_v0_10_0_testrand_bits(6)] += 1 + rustsecp256k1_v0_10_0_testrand_int(255); + CHECK(rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_parse_compact(CTX, &rsignature[4], sig, recid) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_convert(CTX, &signature[4], &rsignature[4]) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_verify(CTX, &signature[4], message, &pubkey) == 0); /* Recover again */ - CHECK(rustsecp256k1_v0_9_2_ecdsa_recover(CTX, &recpubkey, &rsignature[4], message) == 0 || - rustsecp256k1_v0_9_2_memcmp_var(&pubkey, &recpubkey, sizeof(pubkey)) != 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_recover(CTX, &recpubkey, &rsignature[4], message) == 0 || + rustsecp256k1_v0_10_0_memcmp_var(&pubkey, &recpubkey, sizeof(pubkey)) != 0); } /* Tests several edge cases. */ @@ -205,7 +170,7 @@ static void test_ecdsa_recovery_edge_cases(void) { 0x7D, 0xD7, 0x3E, 0x38, 0x7E, 0xE4, 0xFC, 0x86, 0x6E, 0x1B, 0xE8, 0xEC, 0xC7, 0xDD, 0x95, 0x57 }; - rustsecp256k1_v0_9_2_pubkey pubkey; + rustsecp256k1_v0_10_0_pubkey pubkey; /* signature (r,s) = (4,4), which can be recovered with all 4 recids. */ const unsigned char sigb64[64] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -217,19 +182,19 @@ static void test_ecdsa_recovery_edge_cases(void) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, }; - rustsecp256k1_v0_9_2_pubkey pubkeyb; - rustsecp256k1_v0_9_2_ecdsa_recoverable_signature rsig; - rustsecp256k1_v0_9_2_ecdsa_signature sig; + rustsecp256k1_v0_10_0_pubkey pubkeyb; + rustsecp256k1_v0_10_0_ecdsa_recoverable_signature rsig; + rustsecp256k1_v0_10_0_ecdsa_signature sig; int recid; - CHECK(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_parse_compact(CTX, &rsig, sig64, 0)); - CHECK(!rustsecp256k1_v0_9_2_ecdsa_recover(CTX, &pubkey, &rsig, msg32)); - CHECK(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_parse_compact(CTX, &rsig, sig64, 1)); - CHECK(rustsecp256k1_v0_9_2_ecdsa_recover(CTX, &pubkey, &rsig, msg32)); - CHECK(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_parse_compact(CTX, &rsig, sig64, 2)); - CHECK(!rustsecp256k1_v0_9_2_ecdsa_recover(CTX, &pubkey, &rsig, msg32)); - CHECK(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_parse_compact(CTX, &rsig, sig64, 3)); - CHECK(!rustsecp256k1_v0_9_2_ecdsa_recover(CTX, &pubkey, &rsig, msg32)); + CHECK(rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_parse_compact(CTX, &rsig, sig64, 0)); + CHECK(!rustsecp256k1_v0_10_0_ecdsa_recover(CTX, &pubkey, &rsig, msg32)); + CHECK(rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_parse_compact(CTX, &rsig, sig64, 1)); + CHECK(rustsecp256k1_v0_10_0_ecdsa_recover(CTX, &pubkey, &rsig, msg32)); + CHECK(rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_parse_compact(CTX, &rsig, sig64, 2)); + CHECK(!rustsecp256k1_v0_10_0_ecdsa_recover(CTX, &pubkey, &rsig, msg32)); + CHECK(rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_parse_compact(CTX, &rsig, sig64, 3)); + CHECK(!rustsecp256k1_v0_10_0_ecdsa_recover(CTX, &pubkey, &rsig, msg32)); for (recid = 0; recid < 4; recid++) { int i; @@ -274,40 +239,40 @@ static void test_ecdsa_recovery_edge_cases(void) { 0xE6, 0xAF, 0x48, 0xA0, 0x3B, 0xBF, 0xD2, 0x5E, 0x8C, 0xD0, 0x36, 0x41, 0x45, 0x02, 0x01, 0x04 }; - CHECK(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_parse_compact(CTX, &rsig, sigb64, recid) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_recover(CTX, &pubkeyb, &rsig, msg32) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_parse_der(CTX, &sig, sigbder, sizeof(sigbder)) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_verify(CTX, &sig, msg32, &pubkeyb) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_parse_compact(CTX, &rsig, sigb64, recid) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_recover(CTX, &pubkeyb, &rsig, msg32) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_parse_der(CTX, &sig, sigbder, sizeof(sigbder)) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_verify(CTX, &sig, msg32, &pubkeyb) == 1); for (recid2 = 0; recid2 < 4; recid2++) { - rustsecp256k1_v0_9_2_pubkey pubkey2b; - CHECK(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_parse_compact(CTX, &rsig, sigb64, recid2) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_recover(CTX, &pubkey2b, &rsig, msg32) == 1); + rustsecp256k1_v0_10_0_pubkey pubkey2b; + CHECK(rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_parse_compact(CTX, &rsig, sigb64, recid2) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_recover(CTX, &pubkey2b, &rsig, msg32) == 1); /* Verifying with (order + r,4) should always fail. */ - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_parse_der(CTX, &sig, sigbderlong, sizeof(sigbderlong)) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_verify(CTX, &sig, msg32, &pubkeyb) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_parse_der(CTX, &sig, sigbderlong, sizeof(sigbderlong)) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_verify(CTX, &sig, msg32, &pubkeyb) == 0); } /* DER parsing tests. */ /* Zero length r/s. */ - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_parse_der(CTX, &sig, sigcder_zr, sizeof(sigcder_zr)) == 0); - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_parse_der(CTX, &sig, sigcder_zs, sizeof(sigcder_zs)) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_parse_der(CTX, &sig, sigcder_zr, sizeof(sigcder_zr)) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_parse_der(CTX, &sig, sigcder_zs, sizeof(sigcder_zs)) == 0); /* Leading zeros. */ - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_parse_der(CTX, &sig, sigbderalt1, sizeof(sigbderalt1)) == 0); - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_parse_der(CTX, &sig, sigbderalt2, sizeof(sigbderalt2)) == 0); - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_parse_der(CTX, &sig, sigbderalt3, sizeof(sigbderalt3)) == 0); - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_parse_der(CTX, &sig, sigbderalt4, sizeof(sigbderalt4)) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_parse_der(CTX, &sig, sigbderalt1, sizeof(sigbderalt1)) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_parse_der(CTX, &sig, sigbderalt2, sizeof(sigbderalt2)) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_parse_der(CTX, &sig, sigbderalt3, sizeof(sigbderalt3)) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_parse_der(CTX, &sig, sigbderalt4, sizeof(sigbderalt4)) == 0); sigbderalt3[4] = 1; - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_parse_der(CTX, &sig, sigbderalt3, sizeof(sigbderalt3)) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_verify(CTX, &sig, msg32, &pubkeyb) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_parse_der(CTX, &sig, sigbderalt3, sizeof(sigbderalt3)) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_verify(CTX, &sig, msg32, &pubkeyb) == 0); sigbderalt4[7] = 1; - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_parse_der(CTX, &sig, sigbderalt4, sizeof(sigbderalt4)) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_verify(CTX, &sig, msg32, &pubkeyb) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_parse_der(CTX, &sig, sigbderalt4, sizeof(sigbderalt4)) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_verify(CTX, &sig, msg32, &pubkeyb) == 0); /* Damage signature. */ sigbder[7]++; - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_parse_der(CTX, &sig, sigbder, sizeof(sigbder)) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_verify(CTX, &sig, msg32, &pubkeyb) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_parse_der(CTX, &sig, sigbder, sizeof(sigbder)) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_verify(CTX, &sig, msg32, &pubkeyb) == 0); sigbder[7]--; - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_parse_der(CTX, &sig, sigbder, 6) == 0); - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_parse_der(CTX, &sig, sigbder, sizeof(sigbder) - 1) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_parse_der(CTX, &sig, sigbder, 6) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_parse_der(CTX, &sig, sigbder, sizeof(sigbder) - 1) == 0); for(i = 0; i < 8; i++) { int c; unsigned char orig = sigbder[i]; @@ -317,7 +282,7 @@ static void test_ecdsa_recovery_edge_cases(void) { continue; } sigbder[i] = c; - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_parse_der(CTX, &sig, sigbder, sizeof(sigbder)) == 0 || rustsecp256k1_v0_9_2_ecdsa_verify(CTX, &sig, msg32, &pubkeyb) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_parse_der(CTX, &sig, sigbder, sizeof(sigbder)) == 0 || rustsecp256k1_v0_10_0_ecdsa_verify(CTX, &sig, msg32, &pubkeyb) == 0); } sigbder[i] = orig; } @@ -337,25 +302,25 @@ static void test_ecdsa_recovery_edge_cases(void) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, }; - rustsecp256k1_v0_9_2_pubkey pubkeyc; - CHECK(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_parse_compact(CTX, &rsig, sigc64, 0) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_recover(CTX, &pubkeyc, &rsig, msg32) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_parse_der(CTX, &sig, sigcder, sizeof(sigcder)) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_verify(CTX, &sig, msg32, &pubkeyc) == 1); + rustsecp256k1_v0_10_0_pubkey pubkeyc; + CHECK(rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_parse_compact(CTX, &rsig, sigc64, 0) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_recover(CTX, &pubkeyc, &rsig, msg32) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_parse_der(CTX, &sig, sigcder, sizeof(sigcder)) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_verify(CTX, &sig, msg32, &pubkeyc) == 1); sigcder[4] = 0; sigc64[31] = 0; - CHECK(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_parse_compact(CTX, &rsig, sigc64, 0) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_recover(CTX, &pubkeyb, &rsig, msg32) == 0); - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_parse_der(CTX, &sig, sigcder, sizeof(sigcder)) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_verify(CTX, &sig, msg32, &pubkeyc) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_parse_compact(CTX, &rsig, sigc64, 0) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_recover(CTX, &pubkeyb, &rsig, msg32) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_parse_der(CTX, &sig, sigcder, sizeof(sigcder)) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_verify(CTX, &sig, msg32, &pubkeyc) == 0); sigcder[4] = 1; sigcder[7] = 0; sigc64[31] = 1; sigc64[63] = 0; - CHECK(rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_parse_compact(CTX, &rsig, sigc64, 0) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_recover(CTX, &pubkeyb, &rsig, msg32) == 0); - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_parse_der(CTX, &sig, sigcder, sizeof(sigcder)) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_verify(CTX, &sig, msg32, &pubkeyc) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_parse_compact(CTX, &rsig, sigc64, 0) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_recover(CTX, &pubkeyb, &rsig, msg32) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_parse_der(CTX, &sig, sigcder, sizeof(sigcder)) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_verify(CTX, &sig, msg32, &pubkeyc) == 0); } } diff --git a/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/Makefile.am.include b/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/Makefile.am.include index faf5b5fbe..77d9bcffa 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/Makefile.am.include +++ b/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/Makefile.am.include @@ -1,4 +1,4 @@ -include_HEADERS += include/rustsecp256k1_v0_9_2_schnorrsig.h +include_HEADERS += include/rustsecp256k1_v0_10_0_schnorrsig.h noinst_HEADERS += src/modules/schnorrsig/main_impl.h noinst_HEADERS += src/modules/schnorrsig/tests_impl.h noinst_HEADERS += src/modules/schnorrsig/tests_exhaustive_impl.h diff --git a/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/bench_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/bench_impl.h index 36fcf4c98..1140bb167 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/bench_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/bench_impl.h @@ -12,10 +12,10 @@ #define MSGLEN 32 typedef struct { - rustsecp256k1_v0_9_2_context *ctx; + rustsecp256k1_v0_10_0_context *ctx; int n; - const rustsecp256k1_v0_9_2_keypair **keypairs; + const rustsecp256k1_v0_10_0_keypair **keypairs; const unsigned char **pk; const unsigned char **sigs; const unsigned char **msgs; @@ -30,7 +30,7 @@ static void bench_schnorrsig_sign(void* arg, int iters) { for (i = 0; i < iters; i++) { msg[0] = i; msg[1] = i >> 8; - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign_custom(data->ctx, sig, msg, MSGLEN, data->keypairs[i], NULL)); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_sign_custom(data->ctx, sig, msg, MSGLEN, data->keypairs[i], NULL)); } } @@ -39,9 +39,9 @@ static void bench_schnorrsig_verify(void* arg, int iters) { int i; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_9_2_xonly_pubkey pk; - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_parse(data->ctx, &pk, data->pk[i]) == 1); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_verify(data->ctx, data->sigs[i], data->msgs[i], MSGLEN, &pk)); + rustsecp256k1_v0_10_0_xonly_pubkey pk; + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_parse(data->ctx, &pk, data->pk[i]) == 1); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_verify(data->ctx, data->sigs[i], data->msgs[i], MSGLEN, &pk)); } } @@ -50,8 +50,8 @@ static void run_schnorrsig_bench(int iters, int argc, char** argv) { bench_schnorrsig_data data; int d = argc == 1; - data.ctx = rustsecp256k1_v0_9_2_context_create(SECP256K1_CONTEXT_NONE); - data.keypairs = (const rustsecp256k1_v0_9_2_keypair **)malloc(iters * sizeof(rustsecp256k1_v0_9_2_keypair *)); + data.ctx = rustsecp256k1_v0_10_0_context_create(SECP256K1_CONTEXT_NONE); + data.keypairs = (const rustsecp256k1_v0_10_0_keypair **)malloc(iters * sizeof(rustsecp256k1_v0_10_0_keypair *)); data.pk = (const unsigned char **)malloc(iters * sizeof(unsigned char *)); data.msgs = (const unsigned char **)malloc(iters * sizeof(unsigned char *)); data.sigs = (const unsigned char **)malloc(iters * sizeof(unsigned char *)); @@ -61,9 +61,9 @@ static void run_schnorrsig_bench(int iters, int argc, char** argv) { unsigned char sk[32]; unsigned char *msg = (unsigned char *)malloc(MSGLEN); unsigned char *sig = (unsigned char *)malloc(64); - rustsecp256k1_v0_9_2_keypair *keypair = (rustsecp256k1_v0_9_2_keypair *)malloc(sizeof(*keypair)); + rustsecp256k1_v0_10_0_keypair *keypair = (rustsecp256k1_v0_10_0_keypair *)malloc(sizeof(*keypair)); unsigned char *pk_char = (unsigned char *)malloc(32); - rustsecp256k1_v0_9_2_xonly_pubkey pk; + rustsecp256k1_v0_10_0_xonly_pubkey pk; msg[0] = sk[0] = i; msg[1] = sk[1] = i >> 8; msg[2] = sk[2] = i >> 16; @@ -76,10 +76,10 @@ static void run_schnorrsig_bench(int iters, int argc, char** argv) { data.msgs[i] = msg; data.sigs[i] = sig; - CHECK(rustsecp256k1_v0_9_2_keypair_create(data.ctx, keypair, sk)); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign_custom(data.ctx, sig, msg, MSGLEN, keypair, NULL)); - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_pub(data.ctx, &pk, NULL, keypair)); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_serialize(data.ctx, pk_char, &pk) == 1); + CHECK(rustsecp256k1_v0_10_0_keypair_create(data.ctx, keypair, sk)); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_sign_custom(data.ctx, sig, msg, MSGLEN, keypair, NULL)); + CHECK(rustsecp256k1_v0_10_0_keypair_xonly_pub(data.ctx, &pk, NULL, keypair)); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_serialize(data.ctx, pk_char, &pk) == 1); } if (d || have_flag(argc, argv, "schnorrsig") || have_flag(argc, argv, "sign") || have_flag(argc, argv, "schnorrsig_sign")) run_benchmark("schnorrsig_sign", bench_schnorrsig_sign, NULL, NULL, (void *) &data, 10, iters); @@ -98,7 +98,7 @@ static void run_schnorrsig_bench(int iters, int argc, char** argv) { free((void *)data.msgs); free((void *)data.sigs); - rustsecp256k1_v0_9_2_context_destroy(data.ctx); + rustsecp256k1_v0_10_0_context_destroy(data.ctx); } #endif diff --git a/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/main_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/main_impl.h index 82efa4df5..9df11af32 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/main_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/main_impl.h @@ -13,8 +13,8 @@ /* Initializes SHA256 with fixed midstate. This midstate was computed by applying * SHA256 to SHA256("BIP0340/nonce")||SHA256("BIP0340/nonce"). */ -static void rustsecp256k1_v0_9_2_nonce_function_bip340_sha256_tagged(rustsecp256k1_v0_9_2_sha256 *sha) { - rustsecp256k1_v0_9_2_sha256_initialize(sha); +static void rustsecp256k1_v0_10_0_nonce_function_bip340_sha256_tagged(rustsecp256k1_v0_10_0_sha256 *sha) { + rustsecp256k1_v0_10_0_sha256_initialize(sha); sha->s[0] = 0x46615b35ul; sha->s[1] = 0xf4bfbff7ul; sha->s[2] = 0x9f8dc671ul; @@ -29,8 +29,8 @@ static void rustsecp256k1_v0_9_2_nonce_function_bip340_sha256_tagged(rustsecp256 /* Initializes SHA256 with fixed midstate. This midstate was computed by applying * SHA256 to SHA256("BIP0340/aux")||SHA256("BIP0340/aux"). */ -static void rustsecp256k1_v0_9_2_nonce_function_bip340_sha256_tagged_aux(rustsecp256k1_v0_9_2_sha256 *sha) { - rustsecp256k1_v0_9_2_sha256_initialize(sha); +static void rustsecp256k1_v0_10_0_nonce_function_bip340_sha256_tagged_aux(rustsecp256k1_v0_10_0_sha256 *sha) { + rustsecp256k1_v0_10_0_sha256_initialize(sha); sha->s[0] = 0x24dd3219ul; sha->s[1] = 0x4eba7e70ul; sha->s[2] = 0xca0fabb9ul; @@ -50,7 +50,7 @@ static const unsigned char bip340_algo[13] = "BIP0340/nonce"; static const unsigned char schnorrsig_extraparams_magic[4] = SECP256K1_SCHNORRSIG_EXTRAPARAMS_MAGIC; static int nonce_function_bip340(unsigned char *nonce32, const unsigned char *msg, size_t msglen, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo, size_t algolen, void *data) { - rustsecp256k1_v0_9_2_sha256 sha; + rustsecp256k1_v0_10_0_sha256 sha; unsigned char masked_key[32]; int i; @@ -59,9 +59,9 @@ static int nonce_function_bip340(unsigned char *nonce32, const unsigned char *ms } if (data != NULL) { - rustsecp256k1_v0_9_2_nonce_function_bip340_sha256_tagged_aux(&sha); - rustsecp256k1_v0_9_2_sha256_write(&sha, data, 32); - rustsecp256k1_v0_9_2_sha256_finalize(&sha, masked_key); + rustsecp256k1_v0_10_0_nonce_function_bip340_sha256_tagged_aux(&sha); + rustsecp256k1_v0_10_0_sha256_write(&sha, data, 32); + rustsecp256k1_v0_10_0_sha256_finalize(&sha, masked_key); for (i = 0; i < 32; i++) { masked_key[i] ^= key32[i]; } @@ -82,26 +82,26 @@ static int nonce_function_bip340(unsigned char *nonce32, const unsigned char *ms * algorithms. If this nonce function is used in BIP-340 signing as defined * in the spec, an optimized tagging implementation is used. */ if (algolen == sizeof(bip340_algo) - && rustsecp256k1_v0_9_2_memcmp_var(algo, bip340_algo, algolen) == 0) { - rustsecp256k1_v0_9_2_nonce_function_bip340_sha256_tagged(&sha); + && rustsecp256k1_v0_10_0_memcmp_var(algo, bip340_algo, algolen) == 0) { + rustsecp256k1_v0_10_0_nonce_function_bip340_sha256_tagged(&sha); } else { - rustsecp256k1_v0_9_2_sha256_initialize_tagged(&sha, algo, algolen); + rustsecp256k1_v0_10_0_sha256_initialize_tagged(&sha, algo, algolen); } /* Hash masked-key||pk||msg using the tagged hash as per the spec */ - rustsecp256k1_v0_9_2_sha256_write(&sha, masked_key, 32); - rustsecp256k1_v0_9_2_sha256_write(&sha, xonly_pk32, 32); - rustsecp256k1_v0_9_2_sha256_write(&sha, msg, msglen); - rustsecp256k1_v0_9_2_sha256_finalize(&sha, nonce32); + rustsecp256k1_v0_10_0_sha256_write(&sha, masked_key, 32); + rustsecp256k1_v0_10_0_sha256_write(&sha, xonly_pk32, 32); + rustsecp256k1_v0_10_0_sha256_write(&sha, msg, msglen); + rustsecp256k1_v0_10_0_sha256_finalize(&sha, nonce32); return 1; } -const rustsecp256k1_v0_9_2_nonce_function_hardened rustsecp256k1_v0_9_2_nonce_function_bip340 = nonce_function_bip340; +const rustsecp256k1_v0_10_0_nonce_function_hardened rustsecp256k1_v0_10_0_nonce_function_bip340 = nonce_function_bip340; /* Initializes SHA256 with fixed midstate. This midstate was computed by applying * SHA256 to SHA256("BIP0340/challenge")||SHA256("BIP0340/challenge"). */ -static void rustsecp256k1_v0_9_2_schnorrsig_sha256_tagged(rustsecp256k1_v0_9_2_sha256 *sha) { - rustsecp256k1_v0_9_2_sha256_initialize(sha); +static void rustsecp256k1_v0_10_0_schnorrsig_sha256_tagged(rustsecp256k1_v0_10_0_sha256 *sha) { + rustsecp256k1_v0_10_0_sha256_initialize(sha); sha->s[0] = 0x9cecba11ul; sha->s[1] = 0x23925381ul; sha->s[2] = 0x11679112ul; @@ -113,117 +113,117 @@ static void rustsecp256k1_v0_9_2_schnorrsig_sha256_tagged(rustsecp256k1_v0_9_2_s sha->bytes = 64; } -static void rustsecp256k1_v0_9_2_schnorrsig_challenge(rustsecp256k1_v0_9_2_scalar* e, const unsigned char *r32, const unsigned char *msg, size_t msglen, const unsigned char *pubkey32) +static void rustsecp256k1_v0_10_0_schnorrsig_challenge(rustsecp256k1_v0_10_0_scalar* e, const unsigned char *r32, const unsigned char *msg, size_t msglen, const unsigned char *pubkey32) { unsigned char buf[32]; - rustsecp256k1_v0_9_2_sha256 sha; + rustsecp256k1_v0_10_0_sha256 sha; /* tagged hash(r.x, pk.x, msg) */ - rustsecp256k1_v0_9_2_schnorrsig_sha256_tagged(&sha); - rustsecp256k1_v0_9_2_sha256_write(&sha, r32, 32); - rustsecp256k1_v0_9_2_sha256_write(&sha, pubkey32, 32); - rustsecp256k1_v0_9_2_sha256_write(&sha, msg, msglen); - rustsecp256k1_v0_9_2_sha256_finalize(&sha, buf); + rustsecp256k1_v0_10_0_schnorrsig_sha256_tagged(&sha); + rustsecp256k1_v0_10_0_sha256_write(&sha, r32, 32); + rustsecp256k1_v0_10_0_sha256_write(&sha, pubkey32, 32); + rustsecp256k1_v0_10_0_sha256_write(&sha, msg, msglen); + rustsecp256k1_v0_10_0_sha256_finalize(&sha, buf); /* Set scalar e to the challenge hash modulo the curve order as per * BIP340. */ - rustsecp256k1_v0_9_2_scalar_set_b32(e, buf, NULL); + rustsecp256k1_v0_10_0_scalar_set_b32(e, buf, NULL); } -static int rustsecp256k1_v0_9_2_schnorrsig_sign_internal(const rustsecp256k1_v0_9_2_context* ctx, unsigned char *sig64, const unsigned char *msg, size_t msglen, const rustsecp256k1_v0_9_2_keypair *keypair, rustsecp256k1_v0_9_2_nonce_function_hardened noncefp, void *ndata) { - rustsecp256k1_v0_9_2_scalar sk; - rustsecp256k1_v0_9_2_scalar e; - rustsecp256k1_v0_9_2_scalar k; - rustsecp256k1_v0_9_2_gej rj; - rustsecp256k1_v0_9_2_ge pk; - rustsecp256k1_v0_9_2_ge r; +static int rustsecp256k1_v0_10_0_schnorrsig_sign_internal(const rustsecp256k1_v0_10_0_context* ctx, unsigned char *sig64, const unsigned char *msg, size_t msglen, const rustsecp256k1_v0_10_0_keypair *keypair, rustsecp256k1_v0_10_0_nonce_function_hardened noncefp, void *ndata) { + rustsecp256k1_v0_10_0_scalar sk; + rustsecp256k1_v0_10_0_scalar e; + rustsecp256k1_v0_10_0_scalar k; + rustsecp256k1_v0_10_0_gej rj; + rustsecp256k1_v0_10_0_ge pk; + rustsecp256k1_v0_10_0_ge r; unsigned char buf[32] = { 0 }; unsigned char pk_buf[32]; unsigned char seckey[32]; int ret = 1; VERIFY_CHECK(ctx != NULL); - ARG_CHECK(rustsecp256k1_v0_9_2_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); + ARG_CHECK(rustsecp256k1_v0_10_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); ARG_CHECK(sig64 != NULL); ARG_CHECK(msg != NULL || msglen == 0); ARG_CHECK(keypair != NULL); if (noncefp == NULL) { - noncefp = rustsecp256k1_v0_9_2_nonce_function_bip340; + noncefp = rustsecp256k1_v0_10_0_nonce_function_bip340; } - ret &= rustsecp256k1_v0_9_2_keypair_load(ctx, &sk, &pk, keypair); + ret &= rustsecp256k1_v0_10_0_keypair_load(ctx, &sk, &pk, keypair); /* Because we are signing for a x-only pubkey, the secret key is negated * before signing if the point corresponding to the secret key does not * have an even Y. */ - if (rustsecp256k1_v0_9_2_fe_is_odd(&pk.y)) { - rustsecp256k1_v0_9_2_scalar_negate(&sk, &sk); + if (rustsecp256k1_v0_10_0_fe_is_odd(&pk.y)) { + rustsecp256k1_v0_10_0_scalar_negate(&sk, &sk); } - rustsecp256k1_v0_9_2_scalar_get_b32(seckey, &sk); - rustsecp256k1_v0_9_2_fe_get_b32(pk_buf, &pk.x); + rustsecp256k1_v0_10_0_scalar_get_b32(seckey, &sk); + rustsecp256k1_v0_10_0_fe_get_b32(pk_buf, &pk.x); ret &= !!noncefp(buf, msg, msglen, seckey, pk_buf, bip340_algo, sizeof(bip340_algo), ndata); - rustsecp256k1_v0_9_2_scalar_set_b32(&k, buf, NULL); - ret &= !rustsecp256k1_v0_9_2_scalar_is_zero(&k); - rustsecp256k1_v0_9_2_scalar_cmov(&k, &rustsecp256k1_v0_9_2_scalar_one, !ret); + rustsecp256k1_v0_10_0_scalar_set_b32(&k, buf, NULL); + ret &= !rustsecp256k1_v0_10_0_scalar_is_zero(&k); + rustsecp256k1_v0_10_0_scalar_cmov(&k, &rustsecp256k1_v0_10_0_scalar_one, !ret); - rustsecp256k1_v0_9_2_ecmult_gen(&ctx->ecmult_gen_ctx, &rj, &k); - rustsecp256k1_v0_9_2_ge_set_gej(&r, &rj); + rustsecp256k1_v0_10_0_ecmult_gen(&ctx->ecmult_gen_ctx, &rj, &k); + rustsecp256k1_v0_10_0_ge_set_gej(&r, &rj); /* We declassify r to allow using it as a branch point. This is fine * because r is not a secret. */ - rustsecp256k1_v0_9_2_declassify(ctx, &r, sizeof(r)); - rustsecp256k1_v0_9_2_fe_normalize_var(&r.y); - if (rustsecp256k1_v0_9_2_fe_is_odd(&r.y)) { - rustsecp256k1_v0_9_2_scalar_negate(&k, &k); + rustsecp256k1_v0_10_0_declassify(ctx, &r, sizeof(r)); + rustsecp256k1_v0_10_0_fe_normalize_var(&r.y); + if (rustsecp256k1_v0_10_0_fe_is_odd(&r.y)) { + rustsecp256k1_v0_10_0_scalar_negate(&k, &k); } - rustsecp256k1_v0_9_2_fe_normalize_var(&r.x); - rustsecp256k1_v0_9_2_fe_get_b32(&sig64[0], &r.x); + rustsecp256k1_v0_10_0_fe_normalize_var(&r.x); + rustsecp256k1_v0_10_0_fe_get_b32(&sig64[0], &r.x); - rustsecp256k1_v0_9_2_schnorrsig_challenge(&e, &sig64[0], msg, msglen, pk_buf); - rustsecp256k1_v0_9_2_scalar_mul(&e, &e, &sk); - rustsecp256k1_v0_9_2_scalar_add(&e, &e, &k); - rustsecp256k1_v0_9_2_scalar_get_b32(&sig64[32], &e); + rustsecp256k1_v0_10_0_schnorrsig_challenge(&e, &sig64[0], msg, msglen, pk_buf); + rustsecp256k1_v0_10_0_scalar_mul(&e, &e, &sk); + rustsecp256k1_v0_10_0_scalar_add(&e, &e, &k); + rustsecp256k1_v0_10_0_scalar_get_b32(&sig64[32], &e); - rustsecp256k1_v0_9_2_memczero(sig64, 64, !ret); - rustsecp256k1_v0_9_2_scalar_clear(&k); - rustsecp256k1_v0_9_2_scalar_clear(&sk); + rustsecp256k1_v0_10_0_memczero(sig64, 64, !ret); + rustsecp256k1_v0_10_0_scalar_clear(&k); + rustsecp256k1_v0_10_0_scalar_clear(&sk); memset(seckey, 0, sizeof(seckey)); return ret; } -int rustsecp256k1_v0_9_2_schnorrsig_sign32(const rustsecp256k1_v0_9_2_context* ctx, unsigned char *sig64, const unsigned char *msg32, const rustsecp256k1_v0_9_2_keypair *keypair, const unsigned char *aux_rand32) { +int rustsecp256k1_v0_10_0_schnorrsig_sign32(const rustsecp256k1_v0_10_0_context* ctx, unsigned char *sig64, const unsigned char *msg32, const rustsecp256k1_v0_10_0_keypair *keypair, const unsigned char *aux_rand32) { /* We cast away const from the passed aux_rand32 argument since we know the default nonce function does not modify it. */ - return rustsecp256k1_v0_9_2_schnorrsig_sign_internal(ctx, sig64, msg32, 32, keypair, rustsecp256k1_v0_9_2_nonce_function_bip340, (unsigned char*)aux_rand32); + return rustsecp256k1_v0_10_0_schnorrsig_sign_internal(ctx, sig64, msg32, 32, keypair, rustsecp256k1_v0_10_0_nonce_function_bip340, (unsigned char*)aux_rand32); } -int rustsecp256k1_v0_9_2_schnorrsig_sign(const rustsecp256k1_v0_9_2_context* ctx, unsigned char *sig64, const unsigned char *msg32, const rustsecp256k1_v0_9_2_keypair *keypair, const unsigned char *aux_rand32) { - return rustsecp256k1_v0_9_2_schnorrsig_sign32(ctx, sig64, msg32, keypair, aux_rand32); +int rustsecp256k1_v0_10_0_schnorrsig_sign(const rustsecp256k1_v0_10_0_context* ctx, unsigned char *sig64, const unsigned char *msg32, const rustsecp256k1_v0_10_0_keypair *keypair, const unsigned char *aux_rand32) { + return rustsecp256k1_v0_10_0_schnorrsig_sign32(ctx, sig64, msg32, keypair, aux_rand32); } -int rustsecp256k1_v0_9_2_schnorrsig_sign_custom(const rustsecp256k1_v0_9_2_context* ctx, unsigned char *sig64, const unsigned char *msg, size_t msglen, const rustsecp256k1_v0_9_2_keypair *keypair, rustsecp256k1_v0_9_2_schnorrsig_extraparams *extraparams) { - rustsecp256k1_v0_9_2_nonce_function_hardened noncefp = NULL; +int rustsecp256k1_v0_10_0_schnorrsig_sign_custom(const rustsecp256k1_v0_10_0_context* ctx, unsigned char *sig64, const unsigned char *msg, size_t msglen, const rustsecp256k1_v0_10_0_keypair *keypair, rustsecp256k1_v0_10_0_schnorrsig_extraparams *extraparams) { + rustsecp256k1_v0_10_0_nonce_function_hardened noncefp = NULL; void *ndata = NULL; VERIFY_CHECK(ctx != NULL); if (extraparams != NULL) { - ARG_CHECK(rustsecp256k1_v0_9_2_memcmp_var(extraparams->magic, + ARG_CHECK(rustsecp256k1_v0_10_0_memcmp_var(extraparams->magic, schnorrsig_extraparams_magic, sizeof(extraparams->magic)) == 0); noncefp = extraparams->noncefp; ndata = extraparams->ndata; } - return rustsecp256k1_v0_9_2_schnorrsig_sign_internal(ctx, sig64, msg, msglen, keypair, noncefp, ndata); + return rustsecp256k1_v0_10_0_schnorrsig_sign_internal(ctx, sig64, msg, msglen, keypair, noncefp, ndata); } -int rustsecp256k1_v0_9_2_schnorrsig_verify(const rustsecp256k1_v0_9_2_context* ctx, const unsigned char *sig64, const unsigned char *msg, size_t msglen, const rustsecp256k1_v0_9_2_xonly_pubkey *pubkey) { - rustsecp256k1_v0_9_2_scalar s; - rustsecp256k1_v0_9_2_scalar e; - rustsecp256k1_v0_9_2_gej rj; - rustsecp256k1_v0_9_2_ge pk; - rustsecp256k1_v0_9_2_gej pkj; - rustsecp256k1_v0_9_2_fe rx; - rustsecp256k1_v0_9_2_ge r; +int rustsecp256k1_v0_10_0_schnorrsig_verify(const rustsecp256k1_v0_10_0_context* ctx, const unsigned char *sig64, const unsigned char *msg, size_t msglen, const rustsecp256k1_v0_10_0_xonly_pubkey *pubkey) { + rustsecp256k1_v0_10_0_scalar s; + rustsecp256k1_v0_10_0_scalar e; + rustsecp256k1_v0_10_0_gej rj; + rustsecp256k1_v0_10_0_ge pk; + rustsecp256k1_v0_10_0_gej pkj; + rustsecp256k1_v0_10_0_fe rx; + rustsecp256k1_v0_10_0_ge r; unsigned char buf[32]; int overflow; @@ -232,36 +232,36 @@ int rustsecp256k1_v0_9_2_schnorrsig_verify(const rustsecp256k1_v0_9_2_context* c ARG_CHECK(msg != NULL || msglen == 0); ARG_CHECK(pubkey != NULL); - if (!rustsecp256k1_v0_9_2_fe_set_b32_limit(&rx, &sig64[0])) { + if (!rustsecp256k1_v0_10_0_fe_set_b32_limit(&rx, &sig64[0])) { return 0; } - rustsecp256k1_v0_9_2_scalar_set_b32(&s, &sig64[32], &overflow); + rustsecp256k1_v0_10_0_scalar_set_b32(&s, &sig64[32], &overflow); if (overflow) { return 0; } - if (!rustsecp256k1_v0_9_2_xonly_pubkey_load(ctx, &pk, pubkey)) { + if (!rustsecp256k1_v0_10_0_xonly_pubkey_load(ctx, &pk, pubkey)) { return 0; } /* Compute e. */ - rustsecp256k1_v0_9_2_fe_get_b32(buf, &pk.x); - rustsecp256k1_v0_9_2_schnorrsig_challenge(&e, &sig64[0], msg, msglen, buf); + rustsecp256k1_v0_10_0_fe_get_b32(buf, &pk.x); + rustsecp256k1_v0_10_0_schnorrsig_challenge(&e, &sig64[0], msg, msglen, buf); /* Compute rj = s*G + (-e)*pkj */ - rustsecp256k1_v0_9_2_scalar_negate(&e, &e); - rustsecp256k1_v0_9_2_gej_set_ge(&pkj, &pk); - rustsecp256k1_v0_9_2_ecmult(&rj, &pkj, &e, &s); + rustsecp256k1_v0_10_0_scalar_negate(&e, &e); + rustsecp256k1_v0_10_0_gej_set_ge(&pkj, &pk); + rustsecp256k1_v0_10_0_ecmult(&rj, &pkj, &e, &s); - rustsecp256k1_v0_9_2_ge_set_gej_var(&r, &rj); - if (rustsecp256k1_v0_9_2_ge_is_infinity(&r)) { + rustsecp256k1_v0_10_0_ge_set_gej_var(&r, &rj); + if (rustsecp256k1_v0_10_0_ge_is_infinity(&r)) { return 0; } - rustsecp256k1_v0_9_2_fe_normalize_var(&r.y); - return !rustsecp256k1_v0_9_2_fe_is_odd(&r.y) && - rustsecp256k1_v0_9_2_fe_equal(&rx, &r.x); + rustsecp256k1_v0_10_0_fe_normalize_var(&r.y); + return !rustsecp256k1_v0_10_0_fe_is_odd(&r.y) && + rustsecp256k1_v0_10_0_fe_equal(&rx, &r.x); } #endif diff --git a/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h index f765f6a7b..6906786e9 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h @@ -58,12 +58,12 @@ static const unsigned char invalid_pubkey_bytes[][32] = { #define NUM_INVALID_KEYS (sizeof(invalid_pubkey_bytes) / sizeof(invalid_pubkey_bytes[0])) -static int rustsecp256k1_v0_9_2_hardened_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg, +static int rustsecp256k1_v0_10_0_hardened_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg, size_t msglen, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo, size_t algolen, void* data) { - rustsecp256k1_v0_9_2_scalar s; + rustsecp256k1_v0_10_0_scalar s; int *idata = data; (void)msg; (void)msglen; @@ -71,12 +71,12 @@ static int rustsecp256k1_v0_9_2_hardened_nonce_function_smallint(unsigned char * (void)xonly_pk32; (void)algo; (void)algolen; - rustsecp256k1_v0_9_2_scalar_set_int(&s, *idata); - rustsecp256k1_v0_9_2_scalar_get_b32(nonce32, &s); + rustsecp256k1_v0_10_0_scalar_set_int(&s, *idata); + rustsecp256k1_v0_10_0_scalar_get_b32(nonce32, &s); return 1; } -static void test_exhaustive_schnorrsig_verify(const rustsecp256k1_v0_9_2_context *ctx, const rustsecp256k1_v0_9_2_xonly_pubkey* pubkeys, unsigned char (*xonly_pubkey_bytes)[32], const int* parities) { +static void test_exhaustive_schnorrsig_verify(const rustsecp256k1_v0_10_0_context *ctx, const rustsecp256k1_v0_10_0_xonly_pubkey* pubkeys, unsigned char (*xonly_pubkey_bytes)[32], const int* parities) { int d; uint64_t iter = 0; /* Iterate over the possible public keys to verify against (through their corresponding DL d). */ @@ -102,10 +102,10 @@ static void test_exhaustive_schnorrsig_verify(const rustsecp256k1_v0_9_2_context } /* Randomly generate messages until all challenges have been hit. */ while (e_count_done < EXHAUSTIVE_TEST_ORDER) { - rustsecp256k1_v0_9_2_scalar e; + rustsecp256k1_v0_10_0_scalar e; unsigned char msg32[32]; - rustsecp256k1_v0_9_2_testrand256(msg32); - rustsecp256k1_v0_9_2_schnorrsig_challenge(&e, sig64, msg32, sizeof(msg32), pk32); + rustsecp256k1_v0_10_0_testrand256(msg32); + rustsecp256k1_v0_10_0_schnorrsig_challenge(&e, sig64, msg32, sizeof(msg32), pk32); /* Only do work if we hit a challenge we haven't tried before. */ if (!e_done[e]) { /* Iterate over the possible valid last 32 bytes in the signature. @@ -116,14 +116,14 @@ static void test_exhaustive_schnorrsig_verify(const rustsecp256k1_v0_9_2_context int expect_valid, valid; if (s <= EXHAUSTIVE_TEST_ORDER) { memset(sig64 + 32, 0, 32); - rustsecp256k1_v0_9_2_write_be32(sig64 + 60, s); + rustsecp256k1_v0_10_0_write_be32(sig64 + 60, s); expect_valid = actual_k != -1 && s != EXHAUSTIVE_TEST_ORDER && (s == (actual_k + actual_d * e) % EXHAUSTIVE_TEST_ORDER); } else { - rustsecp256k1_v0_9_2_testrand256(sig64 + 32); + rustsecp256k1_v0_10_0_testrand256(sig64 + 32); expect_valid = 0; } - valid = rustsecp256k1_v0_9_2_schnorrsig_verify(ctx, sig64, msg32, sizeof(msg32), &pubkeys[d - 1]); + valid = rustsecp256k1_v0_10_0_schnorrsig_verify(ctx, sig64, msg32, sizeof(msg32), &pubkeys[d - 1]); CHECK(valid == expect_valid); count_valid += valid; } @@ -138,10 +138,10 @@ static void test_exhaustive_schnorrsig_verify(const rustsecp256k1_v0_9_2_context } } -static void test_exhaustive_schnorrsig_sign(const rustsecp256k1_v0_9_2_context *ctx, unsigned char (*xonly_pubkey_bytes)[32], const rustsecp256k1_v0_9_2_keypair* keypairs, const int* parities) { +static void test_exhaustive_schnorrsig_sign(const rustsecp256k1_v0_10_0_context *ctx, unsigned char (*xonly_pubkey_bytes)[32], const rustsecp256k1_v0_10_0_keypair* keypairs, const int* parities) { int d, k; uint64_t iter = 0; - rustsecp256k1_v0_9_2_schnorrsig_extraparams extraparams = SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT; + rustsecp256k1_v0_10_0_schnorrsig_extraparams extraparams = SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT; /* Loop over keys. */ for (d = 1; d < EXHAUSTIVE_TEST_ORDER; ++d) { @@ -155,25 +155,25 @@ static void test_exhaustive_schnorrsig_sign(const rustsecp256k1_v0_9_2_context * unsigned char sig64[64]; int actual_k = k; if (skip_section(&iter)) continue; - extraparams.noncefp = rustsecp256k1_v0_9_2_hardened_nonce_function_smallint; + extraparams.noncefp = rustsecp256k1_v0_10_0_hardened_nonce_function_smallint; extraparams.ndata = &k; if (parities[k - 1]) actual_k = EXHAUSTIVE_TEST_ORDER - k; /* Generate random messages until all challenges have been tried. */ while (e_count_done < EXHAUSTIVE_TEST_ORDER) { - rustsecp256k1_v0_9_2_scalar e; - rustsecp256k1_v0_9_2_testrand256(msg32); - rustsecp256k1_v0_9_2_schnorrsig_challenge(&e, xonly_pubkey_bytes[k - 1], msg32, sizeof(msg32), xonly_pubkey_bytes[d - 1]); + rustsecp256k1_v0_10_0_scalar e; + rustsecp256k1_v0_10_0_testrand256(msg32); + rustsecp256k1_v0_10_0_schnorrsig_challenge(&e, xonly_pubkey_bytes[k - 1], msg32, sizeof(msg32), xonly_pubkey_bytes[d - 1]); /* Only do work if we hit a challenge we haven't tried before. */ if (!e_done[e]) { - rustsecp256k1_v0_9_2_scalar expected_s = (actual_k + e * actual_d) % EXHAUSTIVE_TEST_ORDER; + rustsecp256k1_v0_10_0_scalar expected_s = (actual_k + e * actual_d) % EXHAUSTIVE_TEST_ORDER; unsigned char expected_s_bytes[32]; - rustsecp256k1_v0_9_2_scalar_get_b32(expected_s_bytes, &expected_s); + rustsecp256k1_v0_10_0_scalar_get_b32(expected_s_bytes, &expected_s); /* Invoke the real function to construct a signature. */ - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign_custom(ctx, sig64, msg32, sizeof(msg32), &keypairs[d - 1], &extraparams)); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_sign_custom(ctx, sig64, msg32, sizeof(msg32), &keypairs[d - 1], &extraparams)); /* The first 32 bytes must match the xonly pubkey for the specified k. */ - CHECK(rustsecp256k1_v0_9_2_memcmp_var(sig64, xonly_pubkey_bytes[k - 1], 32) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(sig64, xonly_pubkey_bytes[k - 1], 32) == 0); /* The last 32 bytes must match the expected s value. */ - CHECK(rustsecp256k1_v0_9_2_memcmp_var(sig64 + 32, expected_s_bytes, 32) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(sig64 + 32, expected_s_bytes, 32) == 0); /* Don't retry other messages that result in the same challenge. */ e_done[e] = 1; ++e_count_done; @@ -183,28 +183,28 @@ static void test_exhaustive_schnorrsig_sign(const rustsecp256k1_v0_9_2_context * } } -static void test_exhaustive_schnorrsig(const rustsecp256k1_v0_9_2_context *ctx) { - rustsecp256k1_v0_9_2_keypair keypair[EXHAUSTIVE_TEST_ORDER - 1]; - rustsecp256k1_v0_9_2_xonly_pubkey xonly_pubkey[EXHAUSTIVE_TEST_ORDER - 1]; +static void test_exhaustive_schnorrsig(const rustsecp256k1_v0_10_0_context *ctx) { + rustsecp256k1_v0_10_0_keypair keypair[EXHAUSTIVE_TEST_ORDER - 1]; + rustsecp256k1_v0_10_0_xonly_pubkey xonly_pubkey[EXHAUSTIVE_TEST_ORDER - 1]; int parity[EXHAUSTIVE_TEST_ORDER - 1]; unsigned char xonly_pubkey_bytes[EXHAUSTIVE_TEST_ORDER - 1][32]; unsigned i; /* Verify that all invalid_pubkey_bytes are actually invalid. */ for (i = 0; i < NUM_INVALID_KEYS; ++i) { - rustsecp256k1_v0_9_2_xonly_pubkey pk; - CHECK(!rustsecp256k1_v0_9_2_xonly_pubkey_parse(ctx, &pk, invalid_pubkey_bytes[i])); + rustsecp256k1_v0_10_0_xonly_pubkey pk; + CHECK(!rustsecp256k1_v0_10_0_xonly_pubkey_parse(ctx, &pk, invalid_pubkey_bytes[i])); } /* Construct keypairs and xonly-pubkeys for the entire group. */ for (i = 1; i < EXHAUSTIVE_TEST_ORDER; ++i) { - rustsecp256k1_v0_9_2_scalar scalar_i; + rustsecp256k1_v0_10_0_scalar scalar_i; unsigned char buf[32]; - rustsecp256k1_v0_9_2_scalar_set_int(&scalar_i, i); - rustsecp256k1_v0_9_2_scalar_get_b32(buf, &scalar_i); - CHECK(rustsecp256k1_v0_9_2_keypair_create(ctx, &keypair[i - 1], buf)); - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_pub(ctx, &xonly_pubkey[i - 1], &parity[i - 1], &keypair[i - 1])); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_serialize(ctx, xonly_pubkey_bytes[i - 1], &xonly_pubkey[i - 1])); + rustsecp256k1_v0_10_0_scalar_set_int(&scalar_i, i); + rustsecp256k1_v0_10_0_scalar_get_b32(buf, &scalar_i); + CHECK(rustsecp256k1_v0_10_0_keypair_create(ctx, &keypair[i - 1], buf)); + CHECK(rustsecp256k1_v0_10_0_keypair_xonly_pub(ctx, &xonly_pubkey[i - 1], &parity[i - 1], &keypair[i - 1])); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_serialize(ctx, xonly_pubkey_bytes[i - 1], &xonly_pubkey[i - 1])); } test_exhaustive_schnorrsig_sign(ctx, xonly_pubkey_bytes, keypair, parity); diff --git a/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/tests_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/tests_impl.h index 3452410f3..e29a1dfd4 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/tests_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/tests_impl.h @@ -15,9 +15,9 @@ static void nonce_function_bip340_bitflip(unsigned char **args, size_t n_flip, size_t n_bytes, size_t msglen, size_t algolen) { unsigned char nonces[2][32]; CHECK(nonce_function_bip340(nonces[0], args[0], msglen, args[1], args[2], args[3], algolen, args[4]) == 1); - rustsecp256k1_v0_9_2_testrand_flip(args[n_flip], n_bytes); + rustsecp256k1_v0_10_0_testrand_flip(args[n_flip], n_bytes); CHECK(nonce_function_bip340(nonces[1], args[0], msglen, args[1], args[2], args[3], algolen, args[4]) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(nonces[0], nonces[1], 32) != 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(nonces[0], nonces[1], 32) != 0); } static void run_nonce_function_bip340_tests(void) { @@ -25,8 +25,8 @@ static void run_nonce_function_bip340_tests(void) { unsigned char aux_tag[11] = "BIP0340/aux"; unsigned char algo[13] = "BIP0340/nonce"; size_t algolen = sizeof(algo); - rustsecp256k1_v0_9_2_sha256 sha; - rustsecp256k1_v0_9_2_sha256 sha_optimized; + rustsecp256k1_v0_10_0_sha256 sha; + rustsecp256k1_v0_10_0_sha256 sha_optimized; unsigned char nonce[32], nonce_z[32]; unsigned char msg[32]; size_t msglen = sizeof(msg); @@ -37,23 +37,23 @@ static void run_nonce_function_bip340_tests(void) { int i; /* Check that hash initialized by - * rustsecp256k1_v0_9_2_nonce_function_bip340_sha256_tagged has the expected + * rustsecp256k1_v0_10_0_nonce_function_bip340_sha256_tagged has the expected * state. */ - rustsecp256k1_v0_9_2_sha256_initialize_tagged(&sha, tag, sizeof(tag)); - rustsecp256k1_v0_9_2_nonce_function_bip340_sha256_tagged(&sha_optimized); + rustsecp256k1_v0_10_0_sha256_initialize_tagged(&sha, tag, sizeof(tag)); + rustsecp256k1_v0_10_0_nonce_function_bip340_sha256_tagged(&sha_optimized); test_sha256_eq(&sha, &sha_optimized); /* Check that hash initialized by - * rustsecp256k1_v0_9_2_nonce_function_bip340_sha256_tagged_aux has the expected + * rustsecp256k1_v0_10_0_nonce_function_bip340_sha256_tagged_aux has the expected * state. */ - rustsecp256k1_v0_9_2_sha256_initialize_tagged(&sha, aux_tag, sizeof(aux_tag)); - rustsecp256k1_v0_9_2_nonce_function_bip340_sha256_tagged_aux(&sha_optimized); + rustsecp256k1_v0_10_0_sha256_initialize_tagged(&sha, aux_tag, sizeof(aux_tag)); + rustsecp256k1_v0_10_0_nonce_function_bip340_sha256_tagged_aux(&sha_optimized); test_sha256_eq(&sha, &sha_optimized); - rustsecp256k1_v0_9_2_testrand256(msg); - rustsecp256k1_v0_9_2_testrand256(key); - rustsecp256k1_v0_9_2_testrand256(pk); - rustsecp256k1_v0_9_2_testrand256(aux_rand); + rustsecp256k1_v0_10_0_testrand256(msg); + rustsecp256k1_v0_10_0_testrand256(key); + rustsecp256k1_v0_10_0_testrand256(pk); + rustsecp256k1_v0_10_0_testrand256(aux_rand); /* Check that a bitflip in an argument results in different nonces. */ args[0] = msg; @@ -76,31 +76,31 @@ static void run_nonce_function_bip340_tests(void) { CHECK(nonce_function_bip340(nonce, msg, msglen, key, pk, NULL, 0, NULL) == 0); CHECK(nonce_function_bip340(nonce, msg, msglen, key, pk, algo, algolen, NULL) == 1); /* Other algo is fine */ - rustsecp256k1_v0_9_2_testrand_bytes_test(algo, algolen); + rustsecp256k1_v0_10_0_testrand_bytes_test(algo, algolen); CHECK(nonce_function_bip340(nonce, msg, msglen, key, pk, algo, algolen, NULL) == 1); for (i = 0; i < COUNT; i++) { unsigned char nonce2[32]; - uint32_t offset = rustsecp256k1_v0_9_2_testrand_int(msglen - 1); + uint32_t offset = rustsecp256k1_v0_10_0_testrand_int(msglen - 1); size_t msglen_tmp = (msglen + offset) % msglen; size_t algolen_tmp; /* Different msglen gives different nonce */ CHECK(nonce_function_bip340(nonce2, msg, msglen_tmp, key, pk, algo, algolen, NULL) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(nonce, nonce2, 32) != 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(nonce, nonce2, 32) != 0); /* Different algolen gives different nonce */ - offset = rustsecp256k1_v0_9_2_testrand_int(algolen - 1); + offset = rustsecp256k1_v0_10_0_testrand_int(algolen - 1); algolen_tmp = (algolen + offset) % algolen; CHECK(nonce_function_bip340(nonce2, msg, msglen, key, pk, algo, algolen_tmp, NULL) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(nonce, nonce2, 32) != 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(nonce, nonce2, 32) != 0); } /* NULL aux_rand argument is allowed, and identical to passing all zero aux_rand. */ memset(aux_rand, 0, 32); CHECK(nonce_function_bip340(nonce_z, msg, msglen, key, pk, algo, algolen, &aux_rand) == 1); CHECK(nonce_function_bip340(nonce, msg, msglen, key, pk, algo, algolen, NULL) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(nonce_z, nonce, 32) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(nonce_z, nonce, 32) == 0); } static void test_schnorrsig_api(void) { @@ -108,97 +108,62 @@ static void test_schnorrsig_api(void) { unsigned char sk2[32]; unsigned char sk3[32]; unsigned char msg[32]; - rustsecp256k1_v0_9_2_keypair keypairs[3]; - rustsecp256k1_v0_9_2_keypair invalid_keypair = {{ 0 }}; - rustsecp256k1_v0_9_2_xonly_pubkey pk[3]; - rustsecp256k1_v0_9_2_xonly_pubkey zero_pk; + rustsecp256k1_v0_10_0_keypair keypairs[3]; + rustsecp256k1_v0_10_0_keypair invalid_keypair = {{ 0 }}; + rustsecp256k1_v0_10_0_xonly_pubkey pk[3]; + rustsecp256k1_v0_10_0_xonly_pubkey zero_pk; unsigned char sig[64]; - rustsecp256k1_v0_9_2_schnorrsig_extraparams extraparams = SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT; - rustsecp256k1_v0_9_2_schnorrsig_extraparams invalid_extraparams = {{ 0 }, NULL, NULL}; - - /** setup **/ - int ecount = 0; - - rustsecp256k1_v0_9_2_context_set_error_callback(CTX, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_9_2_context_set_illegal_callback(CTX, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_9_2_context_set_error_callback(STATIC_CTX, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_9_2_context_set_illegal_callback(STATIC_CTX, counting_illegal_callback_fn, &ecount); - - rustsecp256k1_v0_9_2_testrand256(sk1); - rustsecp256k1_v0_9_2_testrand256(sk2); - rustsecp256k1_v0_9_2_testrand256(sk3); - rustsecp256k1_v0_9_2_testrand256(msg); - CHECK(rustsecp256k1_v0_9_2_keypair_create(CTX, &keypairs[0], sk1) == 1); - CHECK(rustsecp256k1_v0_9_2_keypair_create(CTX, &keypairs[1], sk2) == 1); - CHECK(rustsecp256k1_v0_9_2_keypair_create(CTX, &keypairs[2], sk3) == 1); - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_pub(CTX, &pk[0], NULL, &keypairs[0]) == 1); - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_pub(CTX, &pk[1], NULL, &keypairs[1]) == 1); - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_pub(CTX, &pk[2], NULL, &keypairs[2]) == 1); + rustsecp256k1_v0_10_0_schnorrsig_extraparams extraparams = SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT; + rustsecp256k1_v0_10_0_schnorrsig_extraparams invalid_extraparams = {{ 0 }, NULL, NULL}; + + rustsecp256k1_v0_10_0_testrand256(sk1); + rustsecp256k1_v0_10_0_testrand256(sk2); + rustsecp256k1_v0_10_0_testrand256(sk3); + rustsecp256k1_v0_10_0_testrand256(msg); + CHECK(rustsecp256k1_v0_10_0_keypair_create(CTX, &keypairs[0], sk1) == 1); + CHECK(rustsecp256k1_v0_10_0_keypair_create(CTX, &keypairs[1], sk2) == 1); + CHECK(rustsecp256k1_v0_10_0_keypair_create(CTX, &keypairs[2], sk3) == 1); + CHECK(rustsecp256k1_v0_10_0_keypair_xonly_pub(CTX, &pk[0], NULL, &keypairs[0]) == 1); + CHECK(rustsecp256k1_v0_10_0_keypair_xonly_pub(CTX, &pk[1], NULL, &keypairs[1]) == 1); + CHECK(rustsecp256k1_v0_10_0_keypair_xonly_pub(CTX, &pk[2], NULL, &keypairs[2]) == 1); memset(&zero_pk, 0, sizeof(zero_pk)); /** main test body **/ - ecount = 0; - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign32(CTX, sig, msg, &keypairs[0], NULL) == 1); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign32(CTX, NULL, msg, &keypairs[0], NULL) == 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign32(CTX, sig, NULL, &keypairs[0], NULL) == 0); - CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign32(CTX, sig, msg, NULL, NULL) == 0); - CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign32(CTX, sig, msg, &invalid_keypair, NULL) == 0); - CHECK(ecount == 4); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign32(STATIC_CTX, sig, msg, &keypairs[0], NULL) == 0); - CHECK(ecount == 5); - - ecount = 0; - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign_custom(CTX, sig, msg, sizeof(msg), &keypairs[0], &extraparams) == 1); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign_custom(CTX, NULL, msg, sizeof(msg), &keypairs[0], &extraparams) == 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign_custom(CTX, sig, NULL, sizeof(msg), &keypairs[0], &extraparams) == 0); - CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign_custom(CTX, sig, NULL, 0, &keypairs[0], &extraparams) == 1); - CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign_custom(CTX, sig, msg, sizeof(msg), NULL, &extraparams) == 0); - CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign_custom(CTX, sig, msg, sizeof(msg), &invalid_keypair, &extraparams) == 0); - CHECK(ecount == 4); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign_custom(CTX, sig, msg, sizeof(msg), &keypairs[0], NULL) == 1); - CHECK(ecount == 4); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign_custom(CTX, sig, msg, sizeof(msg), &keypairs[0], &invalid_extraparams) == 0); - CHECK(ecount == 5); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign_custom(STATIC_CTX, sig, msg, sizeof(msg), &keypairs[0], &extraparams) == 0); - CHECK(ecount == 6); - - ecount = 0; - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign32(CTX, sig, msg, &keypairs[0], NULL) == 1); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_verify(CTX, sig, msg, sizeof(msg), &pk[0]) == 1); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_verify(CTX, NULL, msg, sizeof(msg), &pk[0]) == 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_verify(CTX, sig, NULL, sizeof(msg), &pk[0]) == 0); - CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_verify(CTX, sig, NULL, 0, &pk[0]) == 0); - CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_verify(CTX, sig, msg, sizeof(msg), NULL) == 0); - CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_verify(CTX, sig, msg, sizeof(msg), &zero_pk) == 0); - CHECK(ecount == 4); - - rustsecp256k1_v0_9_2_context_set_error_callback(STATIC_CTX, NULL, NULL); - rustsecp256k1_v0_9_2_context_set_illegal_callback(STATIC_CTX, NULL, NULL); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_sign32(CTX, sig, msg, &keypairs[0], NULL) == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_schnorrsig_sign32(CTX, NULL, msg, &keypairs[0], NULL)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_schnorrsig_sign32(CTX, sig, NULL, &keypairs[0], NULL)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_schnorrsig_sign32(CTX, sig, msg, NULL, NULL)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_schnorrsig_sign32(CTX, sig, msg, &invalid_keypair, NULL)); + CHECK_ILLEGAL(STATIC_CTX, rustsecp256k1_v0_10_0_schnorrsig_sign32(STATIC_CTX, sig, msg, &keypairs[0], NULL)); + + CHECK(rustsecp256k1_v0_10_0_schnorrsig_sign_custom(CTX, sig, msg, sizeof(msg), &keypairs[0], &extraparams) == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_schnorrsig_sign_custom(CTX, NULL, msg, sizeof(msg), &keypairs[0], &extraparams)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_schnorrsig_sign_custom(CTX, sig, NULL, sizeof(msg), &keypairs[0], &extraparams)); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_sign_custom(CTX, sig, NULL, 0, &keypairs[0], &extraparams) == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_schnorrsig_sign_custom(CTX, sig, msg, sizeof(msg), NULL, &extraparams)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_schnorrsig_sign_custom(CTX, sig, msg, sizeof(msg), &invalid_keypair, &extraparams)); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_sign_custom(CTX, sig, msg, sizeof(msg), &keypairs[0], NULL) == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_schnorrsig_sign_custom(CTX, sig, msg, sizeof(msg), &keypairs[0], &invalid_extraparams)); + CHECK_ILLEGAL(STATIC_CTX, rustsecp256k1_v0_10_0_schnorrsig_sign_custom(STATIC_CTX, sig, msg, sizeof(msg), &keypairs[0], &extraparams)); + + CHECK(rustsecp256k1_v0_10_0_schnorrsig_sign32(CTX, sig, msg, &keypairs[0], NULL) == 1); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_verify(CTX, sig, msg, sizeof(msg), &pk[0]) == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_schnorrsig_verify(CTX, NULL, msg, sizeof(msg), &pk[0])); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_schnorrsig_verify(CTX, sig, NULL, sizeof(msg), &pk[0])); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_verify(CTX, sig, NULL, 0, &pk[0]) == 0); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_schnorrsig_verify(CTX, sig, msg, sizeof(msg), NULL)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_schnorrsig_verify(CTX, sig, msg, sizeof(msg), &zero_pk)); } -/* Checks that hash initialized by rustsecp256k1_v0_9_2_schnorrsig_sha256_tagged has the +/* Checks that hash initialized by rustsecp256k1_v0_10_0_schnorrsig_sha256_tagged has the * expected state. */ static void test_schnorrsig_sha256_tagged(void) { unsigned char tag[17] = "BIP0340/challenge"; - rustsecp256k1_v0_9_2_sha256 sha; - rustsecp256k1_v0_9_2_sha256 sha_optimized; + rustsecp256k1_v0_10_0_sha256 sha; + rustsecp256k1_v0_10_0_sha256 sha_optimized; - rustsecp256k1_v0_9_2_sha256_initialize_tagged(&sha, (unsigned char *) tag, sizeof(tag)); - rustsecp256k1_v0_9_2_schnorrsig_sha256_tagged(&sha_optimized); + rustsecp256k1_v0_10_0_sha256_initialize_tagged(&sha, (unsigned char *) tag, sizeof(tag)); + rustsecp256k1_v0_10_0_schnorrsig_sha256_tagged(&sha_optimized); test_sha256_eq(&sha, &sha_optimized); } @@ -206,34 +171,34 @@ static void test_schnorrsig_sha256_tagged(void) { * Signs the message and checks that it's the same as expected_sig. */ static void test_schnorrsig_bip_vectors_check_signing(const unsigned char *sk, const unsigned char *pk_serialized, const unsigned char *aux_rand, const unsigned char *msg, size_t msglen, const unsigned char *expected_sig) { unsigned char sig[64]; - rustsecp256k1_v0_9_2_keypair keypair; - rustsecp256k1_v0_9_2_xonly_pubkey pk, pk_expected; + rustsecp256k1_v0_10_0_keypair keypair; + rustsecp256k1_v0_10_0_xonly_pubkey pk, pk_expected; - rustsecp256k1_v0_9_2_schnorrsig_extraparams extraparams = SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT; + rustsecp256k1_v0_10_0_schnorrsig_extraparams extraparams = SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT; extraparams.ndata = (unsigned char*)aux_rand; - CHECK(rustsecp256k1_v0_9_2_keypair_create(CTX, &keypair, sk)); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign_custom(CTX, sig, msg, msglen, &keypair, &extraparams)); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(sig, expected_sig, 64) == 0); + CHECK(rustsecp256k1_v0_10_0_keypair_create(CTX, &keypair, sk)); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_sign_custom(CTX, sig, msg, msglen, &keypair, &extraparams)); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(sig, expected_sig, 64) == 0); if (msglen == 32) { memset(sig, 0, 64); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign32(CTX, sig, msg, &keypair, aux_rand)); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(sig, expected_sig, 64) == 0); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_sign32(CTX, sig, msg, &keypair, aux_rand)); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(sig, expected_sig, 64) == 0); } - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_parse(CTX, &pk_expected, pk_serialized)); - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_pub(CTX, &pk, NULL, &keypair)); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pk, &pk_expected, sizeof(pk)) == 0); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_verify(CTX, sig, msg, msglen, &pk)); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_parse(CTX, &pk_expected, pk_serialized)); + CHECK(rustsecp256k1_v0_10_0_keypair_xonly_pub(CTX, &pk, NULL, &keypair)); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pk, &pk_expected, sizeof(pk)) == 0); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_verify(CTX, sig, msg, msglen, &pk)); } /* Helper function for schnorrsig_bip_vectors * Checks that both verify and verify_batch (TODO) return the same value as expected. */ static void test_schnorrsig_bip_vectors_check_verify(const unsigned char *pk_serialized, const unsigned char *msg, size_t msglen, const unsigned char *sig, int expected) { - rustsecp256k1_v0_9_2_xonly_pubkey pk; + rustsecp256k1_v0_10_0_xonly_pubkey pk; - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_parse(CTX, &pk, pk_serialized)); - CHECK(expected == rustsecp256k1_v0_9_2_schnorrsig_verify(CTX, sig, msg, msglen, &pk)); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_parse(CTX, &pk, pk_serialized)); + CHECK(expected == rustsecp256k1_v0_10_0_schnorrsig_verify(CTX, sig, msg, msglen, &pk)); } /* Test vectors according to BIP-340 ("Schnorr Signatures for secp256k1"). See @@ -429,9 +394,9 @@ static void test_schnorrsig_bip_vectors(void) { 0xEB, 0x98, 0x98, 0xAE, 0x79, 0xB9, 0x76, 0x87, 0x66, 0xE4, 0xFA, 0xA0, 0x4A, 0x2D, 0x4A, 0x34 }; - rustsecp256k1_v0_9_2_xonly_pubkey pk_parsed; + rustsecp256k1_v0_10_0_xonly_pubkey pk_parsed; /* No need to check the signature of the test vector as parsing the pubkey already fails */ - CHECK(!rustsecp256k1_v0_9_2_xonly_pubkey_parse(CTX, &pk_parsed, pk)); + CHECK(!rustsecp256k1_v0_10_0_xonly_pubkey_parse(CTX, &pk_parsed, pk)); } { /* Test vector 6 */ @@ -649,9 +614,9 @@ static void test_schnorrsig_bip_vectors(void) { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFC, 0x30 }; - rustsecp256k1_v0_9_2_xonly_pubkey pk_parsed; + rustsecp256k1_v0_10_0_xonly_pubkey pk_parsed; /* No need to check the signature of the test vector as parsing the pubkey already fails */ - CHECK(!rustsecp256k1_v0_9_2_xonly_pubkey_parse(CTX, &pk_parsed, pk)); + CHECK(!rustsecp256k1_v0_10_0_xonly_pubkey_parse(CTX, &pk_parsed, pk)); } { /* Test vector 15 */ @@ -839,48 +804,48 @@ static int nonce_function_overflowing(unsigned char *nonce32, const unsigned cha static void test_schnorrsig_sign(void) { unsigned char sk[32]; - rustsecp256k1_v0_9_2_xonly_pubkey pk; - rustsecp256k1_v0_9_2_keypair keypair; + rustsecp256k1_v0_10_0_xonly_pubkey pk; + rustsecp256k1_v0_10_0_keypair keypair; const unsigned char msg[32] = "this is a msg for a schnorrsig.."; unsigned char sig[64]; unsigned char sig2[64]; unsigned char zeros64[64] = { 0 }; - rustsecp256k1_v0_9_2_schnorrsig_extraparams extraparams = SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT; + rustsecp256k1_v0_10_0_schnorrsig_extraparams extraparams = SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT; unsigned char aux_rand[32]; - rustsecp256k1_v0_9_2_testrand256(sk); - rustsecp256k1_v0_9_2_testrand256(aux_rand); - CHECK(rustsecp256k1_v0_9_2_keypair_create(CTX, &keypair, sk)); - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_pub(CTX, &pk, NULL, &keypair)); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign32(CTX, sig, msg, &keypair, NULL) == 1); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_verify(CTX, sig, msg, sizeof(msg), &pk)); + rustsecp256k1_v0_10_0_testrand256(sk); + rustsecp256k1_v0_10_0_testrand256(aux_rand); + CHECK(rustsecp256k1_v0_10_0_keypair_create(CTX, &keypair, sk)); + CHECK(rustsecp256k1_v0_10_0_keypair_xonly_pub(CTX, &pk, NULL, &keypair)); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_sign32(CTX, sig, msg, &keypair, NULL) == 1); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_verify(CTX, sig, msg, sizeof(msg), &pk)); /* Check that deprecated alias gives the same result */ - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign(CTX, sig2, msg, &keypair, NULL) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(sig, sig2, sizeof(sig)) == 0); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_sign(CTX, sig2, msg, &keypair, NULL) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(sig, sig2, sizeof(sig)) == 0); /* Test different nonce functions */ - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign_custom(CTX, sig, msg, sizeof(msg), &keypair, &extraparams) == 1); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_verify(CTX, sig, msg, sizeof(msg), &pk)); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_sign_custom(CTX, sig, msg, sizeof(msg), &keypair, &extraparams) == 1); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_verify(CTX, sig, msg, sizeof(msg), &pk)); memset(sig, 1, sizeof(sig)); extraparams.noncefp = nonce_function_failing; - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign_custom(CTX, sig, msg, sizeof(msg), &keypair, &extraparams) == 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(sig, zeros64, sizeof(sig)) == 0); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_sign_custom(CTX, sig, msg, sizeof(msg), &keypair, &extraparams) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(sig, zeros64, sizeof(sig)) == 0); memset(&sig, 1, sizeof(sig)); extraparams.noncefp = nonce_function_0; - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign_custom(CTX, sig, msg, sizeof(msg), &keypair, &extraparams) == 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(sig, zeros64, sizeof(sig)) == 0); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_sign_custom(CTX, sig, msg, sizeof(msg), &keypair, &extraparams) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(sig, zeros64, sizeof(sig)) == 0); memset(&sig, 1, sizeof(sig)); extraparams.noncefp = nonce_function_overflowing; - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign_custom(CTX, sig, msg, sizeof(msg), &keypair, &extraparams) == 1); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_verify(CTX, sig, msg, sizeof(msg), &pk)); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_sign_custom(CTX, sig, msg, sizeof(msg), &keypair, &extraparams) == 1); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_verify(CTX, sig, msg, sizeof(msg), &pk)); /* When using the default nonce function, schnorrsig_sign_custom produces * the same result as schnorrsig_sign with aux_rand = extraparams.ndata */ extraparams.noncefp = NULL; extraparams.ndata = aux_rand; - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign_custom(CTX, sig, msg, sizeof(msg), &keypair, &extraparams) == 1); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign32(CTX, sig2, msg, &keypair, extraparams.ndata) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(sig, sig2, sizeof(sig)) == 0); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_sign_custom(CTX, sig, msg, sizeof(msg), &keypair, &extraparams) == 1); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_sign32(CTX, sig2, msg, &keypair, extraparams.ndata) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(sig, sig2, sizeof(sig)) == 0); } #define N_SIGS 3 @@ -892,84 +857,84 @@ static void test_schnorrsig_sign_verify(void) { unsigned char msg[N_SIGS][32]; unsigned char sig[N_SIGS][64]; size_t i; - rustsecp256k1_v0_9_2_keypair keypair; - rustsecp256k1_v0_9_2_xonly_pubkey pk; - rustsecp256k1_v0_9_2_scalar s; + rustsecp256k1_v0_10_0_keypair keypair; + rustsecp256k1_v0_10_0_xonly_pubkey pk; + rustsecp256k1_v0_10_0_scalar s; - rustsecp256k1_v0_9_2_testrand256(sk); - CHECK(rustsecp256k1_v0_9_2_keypair_create(CTX, &keypair, sk)); - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_pub(CTX, &pk, NULL, &keypair)); + rustsecp256k1_v0_10_0_testrand256(sk); + CHECK(rustsecp256k1_v0_10_0_keypair_create(CTX, &keypair, sk)); + CHECK(rustsecp256k1_v0_10_0_keypair_xonly_pub(CTX, &pk, NULL, &keypair)); for (i = 0; i < N_SIGS; i++) { - rustsecp256k1_v0_9_2_testrand256(msg[i]); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign32(CTX, sig[i], msg[i], &keypair, NULL)); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_verify(CTX, sig[i], msg[i], sizeof(msg[i]), &pk)); + rustsecp256k1_v0_10_0_testrand256(msg[i]); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_sign32(CTX, sig[i], msg[i], &keypair, NULL)); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_verify(CTX, sig[i], msg[i], sizeof(msg[i]), &pk)); } { /* Flip a few bits in the signature and in the message and check that * verify and verify_batch (TODO) fail */ - size_t sig_idx = rustsecp256k1_v0_9_2_testrand_int(N_SIGS); - size_t byte_idx = rustsecp256k1_v0_9_2_testrand_bits(5); - unsigned char xorbyte = rustsecp256k1_v0_9_2_testrand_int(254)+1; + size_t sig_idx = rustsecp256k1_v0_10_0_testrand_int(N_SIGS); + size_t byte_idx = rustsecp256k1_v0_10_0_testrand_bits(5); + unsigned char xorbyte = rustsecp256k1_v0_10_0_testrand_int(254)+1; sig[sig_idx][byte_idx] ^= xorbyte; - CHECK(!rustsecp256k1_v0_9_2_schnorrsig_verify(CTX, sig[sig_idx], msg[sig_idx], sizeof(msg[sig_idx]), &pk)); + CHECK(!rustsecp256k1_v0_10_0_schnorrsig_verify(CTX, sig[sig_idx], msg[sig_idx], sizeof(msg[sig_idx]), &pk)); sig[sig_idx][byte_idx] ^= xorbyte; - byte_idx = rustsecp256k1_v0_9_2_testrand_bits(5); + byte_idx = rustsecp256k1_v0_10_0_testrand_bits(5); sig[sig_idx][32+byte_idx] ^= xorbyte; - CHECK(!rustsecp256k1_v0_9_2_schnorrsig_verify(CTX, sig[sig_idx], msg[sig_idx], sizeof(msg[sig_idx]), &pk)); + CHECK(!rustsecp256k1_v0_10_0_schnorrsig_verify(CTX, sig[sig_idx], msg[sig_idx], sizeof(msg[sig_idx]), &pk)); sig[sig_idx][32+byte_idx] ^= xorbyte; - byte_idx = rustsecp256k1_v0_9_2_testrand_bits(5); + byte_idx = rustsecp256k1_v0_10_0_testrand_bits(5); msg[sig_idx][byte_idx] ^= xorbyte; - CHECK(!rustsecp256k1_v0_9_2_schnorrsig_verify(CTX, sig[sig_idx], msg[sig_idx], sizeof(msg[sig_idx]), &pk)); + CHECK(!rustsecp256k1_v0_10_0_schnorrsig_verify(CTX, sig[sig_idx], msg[sig_idx], sizeof(msg[sig_idx]), &pk)); msg[sig_idx][byte_idx] ^= xorbyte; /* Check that above bitflips have been reversed correctly */ - CHECK(rustsecp256k1_v0_9_2_schnorrsig_verify(CTX, sig[sig_idx], msg[sig_idx], sizeof(msg[sig_idx]), &pk)); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_verify(CTX, sig[sig_idx], msg[sig_idx], sizeof(msg[sig_idx]), &pk)); } /* Test overflowing s */ - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign32(CTX, sig[0], msg[0], &keypair, NULL)); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_verify(CTX, sig[0], msg[0], sizeof(msg[0]), &pk)); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_sign32(CTX, sig[0], msg[0], &keypair, NULL)); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_verify(CTX, sig[0], msg[0], sizeof(msg[0]), &pk)); memset(&sig[0][32], 0xFF, 32); - CHECK(!rustsecp256k1_v0_9_2_schnorrsig_verify(CTX, sig[0], msg[0], sizeof(msg[0]), &pk)); + CHECK(!rustsecp256k1_v0_10_0_schnorrsig_verify(CTX, sig[0], msg[0], sizeof(msg[0]), &pk)); /* Test negative s */ - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign32(CTX, sig[0], msg[0], &keypair, NULL)); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_verify(CTX, sig[0], msg[0], sizeof(msg[0]), &pk)); - rustsecp256k1_v0_9_2_scalar_set_b32(&s, &sig[0][32], NULL); - rustsecp256k1_v0_9_2_scalar_negate(&s, &s); - rustsecp256k1_v0_9_2_scalar_get_b32(&sig[0][32], &s); - CHECK(!rustsecp256k1_v0_9_2_schnorrsig_verify(CTX, sig[0], msg[0], sizeof(msg[0]), &pk)); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_sign32(CTX, sig[0], msg[0], &keypair, NULL)); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_verify(CTX, sig[0], msg[0], sizeof(msg[0]), &pk)); + rustsecp256k1_v0_10_0_scalar_set_b32(&s, &sig[0][32], NULL); + rustsecp256k1_v0_10_0_scalar_negate(&s, &s); + rustsecp256k1_v0_10_0_scalar_get_b32(&sig[0][32], &s); + CHECK(!rustsecp256k1_v0_10_0_schnorrsig_verify(CTX, sig[0], msg[0], sizeof(msg[0]), &pk)); /* The empty message can be signed & verified */ - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign_custom(CTX, sig[0], NULL, 0, &keypair, NULL) == 1); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_verify(CTX, sig[0], NULL, 0, &pk) == 1); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_sign_custom(CTX, sig[0], NULL, 0, &keypair, NULL) == 1); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_verify(CTX, sig[0], NULL, 0, &pk) == 1); { /* Test varying message lengths */ unsigned char msg_large[32 * 8]; - uint32_t msglen = rustsecp256k1_v0_9_2_testrand_int(sizeof(msg_large)); + uint32_t msglen = rustsecp256k1_v0_10_0_testrand_int(sizeof(msg_large)); for (i = 0; i < sizeof(msg_large); i += 32) { - rustsecp256k1_v0_9_2_testrand256(&msg_large[i]); + rustsecp256k1_v0_10_0_testrand256(&msg_large[i]); } - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign_custom(CTX, sig[0], msg_large, msglen, &keypair, NULL) == 1); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_verify(CTX, sig[0], msg_large, msglen, &pk) == 1); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_sign_custom(CTX, sig[0], msg_large, msglen, &keypair, NULL) == 1); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_verify(CTX, sig[0], msg_large, msglen, &pk) == 1); /* Verification for a random wrong message length fails */ msglen = (msglen + (sizeof(msg_large) - 1)) % sizeof(msg_large); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_verify(CTX, sig[0], msg_large, msglen, &pk) == 0); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_verify(CTX, sig[0], msg_large, msglen, &pk) == 0); } } #undef N_SIGS static void test_schnorrsig_taproot(void) { unsigned char sk[32]; - rustsecp256k1_v0_9_2_keypair keypair; - rustsecp256k1_v0_9_2_xonly_pubkey internal_pk; + rustsecp256k1_v0_10_0_keypair keypair; + rustsecp256k1_v0_10_0_xonly_pubkey internal_pk; unsigned char internal_pk_bytes[32]; - rustsecp256k1_v0_9_2_xonly_pubkey output_pk; + rustsecp256k1_v0_10_0_xonly_pubkey output_pk; unsigned char output_pk_bytes[32]; unsigned char tweak[32]; int pk_parity; @@ -977,27 +942,27 @@ static void test_schnorrsig_taproot(void) { unsigned char sig[64]; /* Create output key */ - rustsecp256k1_v0_9_2_testrand256(sk); - CHECK(rustsecp256k1_v0_9_2_keypair_create(CTX, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_pub(CTX, &internal_pk, NULL, &keypair) == 1); + rustsecp256k1_v0_10_0_testrand256(sk); + CHECK(rustsecp256k1_v0_10_0_keypair_create(CTX, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_10_0_keypair_xonly_pub(CTX, &internal_pk, NULL, &keypair) == 1); /* In actual taproot the tweak would be hash of internal_pk */ - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_serialize(CTX, tweak, &internal_pk) == 1); - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_tweak_add(CTX, &keypair, tweak) == 1); - CHECK(rustsecp256k1_v0_9_2_keypair_xonly_pub(CTX, &output_pk, &pk_parity, &keypair) == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_serialize(CTX, output_pk_bytes, &output_pk) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_serialize(CTX, tweak, &internal_pk) == 1); + CHECK(rustsecp256k1_v0_10_0_keypair_xonly_tweak_add(CTX, &keypair, tweak) == 1); + CHECK(rustsecp256k1_v0_10_0_keypair_xonly_pub(CTX, &output_pk, &pk_parity, &keypair) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_serialize(CTX, output_pk_bytes, &output_pk) == 1); /* Key spend */ - rustsecp256k1_v0_9_2_testrand256(msg); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_sign32(CTX, sig, msg, &keypair, NULL) == 1); + rustsecp256k1_v0_10_0_testrand256(msg); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_sign32(CTX, sig, msg, &keypair, NULL) == 1); /* Verify key spend */ - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_parse(CTX, &output_pk, output_pk_bytes) == 1); - CHECK(rustsecp256k1_v0_9_2_schnorrsig_verify(CTX, sig, msg, sizeof(msg), &output_pk) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_parse(CTX, &output_pk, output_pk_bytes) == 1); + CHECK(rustsecp256k1_v0_10_0_schnorrsig_verify(CTX, sig, msg, sizeof(msg), &output_pk) == 1); /* Script spend */ - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_serialize(CTX, internal_pk_bytes, &internal_pk) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_serialize(CTX, internal_pk_bytes, &internal_pk) == 1); /* Verify script spend */ - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_parse(CTX, &internal_pk, internal_pk_bytes) == 1); - CHECK(rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add_check(CTX, output_pk_bytes, pk_parity, &internal_pk, tweak) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_parse(CTX, &internal_pk, internal_pk_bytes) == 1); + CHECK(rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add_check(CTX, output_pk_bytes, pk_parity, &internal_pk, tweak) == 1); } static void run_schnorrsig_tests(void) { diff --git a/secp256k1-sys/depend/secp256k1/src/precompute_ecmult.c b/secp256k1-sys/depend/secp256k1/src/precompute_ecmult.c index 6984acdd4..b460cbe51 100644 --- a/secp256k1-sys/depend/secp256k1/src/precompute_ecmult.c +++ b/secp256k1-sys/depend/secp256k1/src/precompute_ecmult.c @@ -18,11 +18,11 @@ #include "ecmult.h" #include "ecmult_compute_table_impl.h" -static void print_table(FILE *fp, const char *name, int window_g, const rustsecp256k1_v0_9_2_ge_storage* table) { +static void print_table(FILE *fp, const char *name, int window_g, const rustsecp256k1_v0_10_0_ge_storage* table) { int j; int i; - fprintf(fp, "const rustsecp256k1_v0_9_2_ge_storage %s[ECMULT_TABLE_SIZE(WINDOW_G)] = {\n", name); + fprintf(fp, "const rustsecp256k1_v0_10_0_ge_storage %s[ECMULT_TABLE_SIZE(WINDOW_G)] = {\n", name); fprintf(fp, " S(%"PRIx32",%"PRIx32",%"PRIx32",%"PRIx32",%"PRIx32",%"PRIx32",%"PRIx32",%"PRIx32 ",%"PRIx32",%"PRIx32",%"PRIx32",%"PRIx32",%"PRIx32",%"PRIx32",%"PRIx32",%"PRIx32")\n", SECP256K1_GE_STORAGE_CONST_GET(table[0])); @@ -41,13 +41,13 @@ static void print_table(FILE *fp, const char *name, int window_g, const rustsecp } static void print_two_tables(FILE *fp, int window_g) { - rustsecp256k1_v0_9_2_ge_storage* table = malloc(ECMULT_TABLE_SIZE(window_g) * sizeof(rustsecp256k1_v0_9_2_ge_storage)); - rustsecp256k1_v0_9_2_ge_storage* table_128 = malloc(ECMULT_TABLE_SIZE(window_g) * sizeof(rustsecp256k1_v0_9_2_ge_storage)); + rustsecp256k1_v0_10_0_ge_storage* table = malloc(ECMULT_TABLE_SIZE(window_g) * sizeof(rustsecp256k1_v0_10_0_ge_storage)); + rustsecp256k1_v0_10_0_ge_storage* table_128 = malloc(ECMULT_TABLE_SIZE(window_g) * sizeof(rustsecp256k1_v0_10_0_ge_storage)); - rustsecp256k1_v0_9_2_ecmult_compute_two_tables(table, table_128, window_g, &rustsecp256k1_v0_9_2_ge_const_g); + rustsecp256k1_v0_10_0_ecmult_compute_two_tables(table, table_128, window_g, &rustsecp256k1_v0_10_0_ge_const_g); - print_table(fp, "rustsecp256k1_v0_9_2_pre_g", window_g, table); - print_table(fp, "rustsecp256k1_v0_9_2_pre_g_128", window_g, table_128); + print_table(fp, "rustsecp256k1_v0_10_0_pre_g", window_g, table); + print_table(fp, "rustsecp256k1_v0_10_0_pre_g_128", window_g, table_128); free(table); free(table_128); @@ -66,8 +66,8 @@ int main(void) { } fprintf(fp, "/* This file was automatically generated by precompute_ecmult. */\n"); - fprintf(fp, "/* This file contains an array rustsecp256k1_v0_9_2_pre_g with odd multiples of the base point G and\n"); - fprintf(fp, " * an array rustsecp256k1_v0_9_2_pre_g_128 with odd multiples of 2^128*G for accelerating the computation of a*P + b*G.\n"); + fprintf(fp, "/* This file contains an array rustsecp256k1_v0_10_0_pre_g with odd multiples of the base point G and\n"); + fprintf(fp, " * an array rustsecp256k1_v0_10_0_pre_g_128 with odd multiples of 2^128*G for accelerating the computation of a*P + b*G.\n"); fprintf(fp, " */\n"); fprintf(fp, "#include \"group.h\"\n"); fprintf(fp, "#include \"ecmult.h\"\n"); diff --git a/secp256k1-sys/depend/secp256k1/src/precompute_ecmult_gen.c b/secp256k1-sys/depend/secp256k1/src/precompute_ecmult_gen.c index 5f09abbe8..05097c7ae 100644 --- a/secp256k1-sys/depend/secp256k1/src/precompute_ecmult_gen.c +++ b/secp256k1-sys/depend/secp256k1/src/precompute_ecmult_gen.c @@ -40,15 +40,15 @@ int main(int argc, char **argv) { fprintf(fp, "# error Cannot compile precomputed_ecmult_gen.c in exhaustive test mode\n"); fprintf(fp, "#endif /* EXHAUSTIVE_TEST_ORDER */\n"); fprintf(fp, "#define S(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p) SECP256K1_GE_STORAGE_CONST(0x##a##u,0x##b##u,0x##c##u,0x##d##u,0x##e##u,0x##f##u,0x##g##u,0x##h##u,0x##i##u,0x##j##u,0x##k##u,0x##l##u,0x##m##u,0x##n##u,0x##o##u,0x##p##u)\n"); - fprintf(fp, "const rustsecp256k1_v0_9_2_ge_storage rustsecp256k1_v0_9_2_ecmult_gen_prec_table[ECMULT_GEN_PREC_N(ECMULT_GEN_PREC_BITS)][ECMULT_GEN_PREC_G(ECMULT_GEN_PREC_BITS)] = {\n"); + fprintf(fp, "const rustsecp256k1_v0_10_0_ge_storage rustsecp256k1_v0_10_0_ecmult_gen_prec_table[ECMULT_GEN_PREC_N(ECMULT_GEN_PREC_BITS)][ECMULT_GEN_PREC_G(ECMULT_GEN_PREC_BITS)] = {\n"); for (bits = 2; bits <= 8; bits *= 2) { int g = ECMULT_GEN_PREC_G(bits); int n = ECMULT_GEN_PREC_N(bits); int inner, outer; - rustsecp256k1_v0_9_2_ge_storage* table = checked_malloc(&default_error_callback, n * g * sizeof(rustsecp256k1_v0_9_2_ge_storage)); - rustsecp256k1_v0_9_2_ecmult_gen_compute_table(table, &rustsecp256k1_v0_9_2_ge_const_g, bits); + rustsecp256k1_v0_10_0_ge_storage* table = checked_malloc(&default_error_callback, n * g * sizeof(rustsecp256k1_v0_10_0_ge_storage)); + rustsecp256k1_v0_10_0_ecmult_gen_compute_table(table, &rustsecp256k1_v0_10_0_ge_const_g, bits); fprintf(fp, "#if ECMULT_GEN_PREC_BITS == %d\n", bits); for(outer = 0; outer != n; outer++) { diff --git a/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult.c b/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult.c index c8cabb896..9dd2ca25d 100644 --- a/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult.c +++ b/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult.c @@ -1,6 +1,6 @@ /* This file was automatically generated by precompute_ecmult. */ -/* This file contains an array rustsecp256k1_v0_9_2_pre_g with odd multiples of the base point G and - * an array rustsecp256k1_v0_9_2_pre_g_128 with odd multiples of 2^128*G for accelerating the computation of a*P + b*G. +/* This file contains an array rustsecp256k1_v0_10_0_pre_g with odd multiples of the base point G and + * an array rustsecp256k1_v0_10_0_pre_g_128 with odd multiples of 2^128*G for accelerating the computation of a*P + b*G. */ #include "group.h" #include "ecmult.h" @@ -13,7 +13,7 @@ # error Cannot compile precomputed_ecmult.c in exhaustive test mode #endif /* EXHAUSTIVE_TEST_ORDER */ #define WINDOW_G ECMULT_WINDOW_SIZE -const rustsecp256k1_v0_9_2_ge_storage rustsecp256k1_v0_9_2_pre_g[ECMULT_TABLE_SIZE(WINDOW_G)] = { +const rustsecp256k1_v0_10_0_ge_storage rustsecp256k1_v0_10_0_pre_g[ECMULT_TABLE_SIZE(WINDOW_G)] = { S(79be667e,f9dcbbac,55a06295,ce870b07,29bfcdb,2dce28d9,59f2815b,16f81798,483ada77,26a3c465,5da4fbfc,e1108a8,fd17b448,a6855419,9c47d08f,fb10d4b8) #if WINDOW_G > 2 ,S(f9308a01,9258c310,49344f85,f89d5229,b531c845,836f99b0,8601f113,bce036f9,388f7b0f,632de814,fe337e6,2a37f356,6500a999,34c2231b,6cb9fd75,84b8e672) @@ -8233,7 +8233,7 @@ const rustsecp256k1_v0_9_2_ge_storage rustsecp256k1_v0_9_2_pre_g[ECMULT_TABLE_SI ,S(1e70619c,381a6adc,e5d925e0,c9c74f97,3c02ff64,ff2662d7,34efc485,d2bce895,c923f771,f543ffed,42935c28,8474aaaf,80a46ad4,3c579ce0,bb5e663d,668b24b3) #endif }; -const rustsecp256k1_v0_9_2_ge_storage rustsecp256k1_v0_9_2_pre_g_128[ECMULT_TABLE_SIZE(WINDOW_G)] = { +const rustsecp256k1_v0_10_0_ge_storage rustsecp256k1_v0_10_0_pre_g_128[ECMULT_TABLE_SIZE(WINDOW_G)] = { S(8f68b9d2,f63b5f33,9239c1ad,981f162e,e88c5678,723ea335,1b7b444c,9ec4c0da,662a9f2d,ba063986,de1d90c2,b6be215d,bbea2cfe,95510bfd,f23cbf79,501fff82) #if WINDOW_G > 2 ,S(38381dbe,2e509f22,8ba93363,f2451f08,fd845cb3,51d954be,18e2b8ed,d23809fa,e4a32d0a,fb917dc,b09405a5,520eb1cc,3681fccb,32d8f24d,bd707518,331fed52) diff --git a/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult.h b/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult.h index 3fa91540f..6da106e8a 100644 --- a/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult.h +++ b/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult.h @@ -23,12 +23,12 @@ extern "C" { # else # error No known generator for the specified exhaustive test group order. # endif -static rustsecp256k1_v0_9_2_ge_storage rustsecp256k1_v0_9_2_pre_g[ECMULT_TABLE_SIZE(WINDOW_G)]; -static rustsecp256k1_v0_9_2_ge_storage rustsecp256k1_v0_9_2_pre_g_128[ECMULT_TABLE_SIZE(WINDOW_G)]; +static rustsecp256k1_v0_10_0_ge_storage rustsecp256k1_v0_10_0_pre_g[ECMULT_TABLE_SIZE(WINDOW_G)]; +static rustsecp256k1_v0_10_0_ge_storage rustsecp256k1_v0_10_0_pre_g_128[ECMULT_TABLE_SIZE(WINDOW_G)]; #else /* !defined(EXHAUSTIVE_TEST_ORDER) */ # define WINDOW_G ECMULT_WINDOW_SIZE -extern const rustsecp256k1_v0_9_2_ge_storage rustsecp256k1_v0_9_2_pre_g[ECMULT_TABLE_SIZE(WINDOW_G)]; -extern const rustsecp256k1_v0_9_2_ge_storage rustsecp256k1_v0_9_2_pre_g_128[ECMULT_TABLE_SIZE(WINDOW_G)]; +extern const rustsecp256k1_v0_10_0_ge_storage rustsecp256k1_v0_10_0_pre_g[ECMULT_TABLE_SIZE(WINDOW_G)]; +extern const rustsecp256k1_v0_10_0_ge_storage rustsecp256k1_v0_10_0_pre_g_128[ECMULT_TABLE_SIZE(WINDOW_G)]; #endif /* defined(EXHAUSTIVE_TEST_ORDER) */ #ifdef __cplusplus diff --git a/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult_gen.c b/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult_gen.c index 0fcab9d33..9e2f33958 100644 --- a/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult_gen.c +++ b/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult_gen.c @@ -7,7 +7,7 @@ # error Cannot compile precomputed_ecmult_gen.c in exhaustive test mode #endif /* EXHAUSTIVE_TEST_ORDER */ #define S(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p) SECP256K1_GE_STORAGE_CONST(0x##a##u,0x##b##u,0x##c##u,0x##d##u,0x##e##u,0x##f##u,0x##g##u,0x##h##u,0x##i##u,0x##j##u,0x##k##u,0x##l##u,0x##m##u,0x##n##u,0x##o##u,0x##p##u) -const rustsecp256k1_v0_9_2_ge_storage rustsecp256k1_v0_9_2_ecmult_gen_prec_table[ECMULT_GEN_PREC_N(ECMULT_GEN_PREC_BITS)][ECMULT_GEN_PREC_G(ECMULT_GEN_PREC_BITS)] = { +const rustsecp256k1_v0_10_0_ge_storage rustsecp256k1_v0_10_0_ecmult_gen_prec_table[ECMULT_GEN_PREC_N(ECMULT_GEN_PREC_BITS)][ECMULT_GEN_PREC_G(ECMULT_GEN_PREC_BITS)] = { #if ECMULT_GEN_PREC_BITS == 2 {S(3a9ed373,6eed3eec,9aeb5ac0,21b54652,56817b1f,8de6cd0,fbcee548,ba044bb5,7bcc5928,bdc9c023,dfc663b8,9e4f6969,ab751798,8e600ec1,d242010c,45c7974a), S(e44d7675,c3cb2857,4e133c01,a74f4afc,5ce684f8,4a789711,603f7c4f,50abef58,25bcb62f,fe2e2ce2,196ad86c,a006e20,8c64d21b,b25320a3,b5574b9c,1e1bfb4b), diff --git a/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult_gen.h b/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult_gen.h index 1aaa73ded..37094a19b 100644 --- a/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult_gen.h +++ b/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult_gen.h @@ -14,9 +14,9 @@ extern "C" { #include "group.h" #include "ecmult_gen.h" #ifdef EXHAUSTIVE_TEST_ORDER -static rustsecp256k1_v0_9_2_ge_storage rustsecp256k1_v0_9_2_ecmult_gen_prec_table[ECMULT_GEN_PREC_N(ECMULT_GEN_PREC_BITS)][ECMULT_GEN_PREC_G(ECMULT_GEN_PREC_BITS)]; +static rustsecp256k1_v0_10_0_ge_storage rustsecp256k1_v0_10_0_ecmult_gen_prec_table[ECMULT_GEN_PREC_N(ECMULT_GEN_PREC_BITS)][ECMULT_GEN_PREC_G(ECMULT_GEN_PREC_BITS)]; #else -extern const rustsecp256k1_v0_9_2_ge_storage rustsecp256k1_v0_9_2_ecmult_gen_prec_table[ECMULT_GEN_PREC_N(ECMULT_GEN_PREC_BITS)][ECMULT_GEN_PREC_G(ECMULT_GEN_PREC_BITS)]; +extern const rustsecp256k1_v0_10_0_ge_storage rustsecp256k1_v0_10_0_ecmult_gen_prec_table[ECMULT_GEN_PREC_N(ECMULT_GEN_PREC_BITS)][ECMULT_GEN_PREC_G(ECMULT_GEN_PREC_BITS)]; #endif /* defined(EXHAUSTIVE_TEST_ORDER) */ #ifdef __cplusplus diff --git a/secp256k1-sys/depend/secp256k1/src/scalar.h b/secp256k1-sys/depend/secp256k1/src/scalar.h index a9d7fd5c1..28c01d6c1 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar.h @@ -20,86 +20,86 @@ #endif /** Clear a scalar to prevent the leak of sensitive data. */ -static void rustsecp256k1_v0_9_2_scalar_clear(rustsecp256k1_v0_9_2_scalar *r); +static void rustsecp256k1_v0_10_0_scalar_clear(rustsecp256k1_v0_10_0_scalar *r); /** Access bits from a scalar. All requested bits must belong to the same 32-bit limb. */ -static unsigned int rustsecp256k1_v0_9_2_scalar_get_bits(const rustsecp256k1_v0_9_2_scalar *a, unsigned int offset, unsigned int count); +static unsigned int rustsecp256k1_v0_10_0_scalar_get_bits(const rustsecp256k1_v0_10_0_scalar *a, unsigned int offset, unsigned int count); -/** Access bits from a scalar. Not constant time. */ -static unsigned int rustsecp256k1_v0_9_2_scalar_get_bits_var(const rustsecp256k1_v0_9_2_scalar *a, unsigned int offset, unsigned int count); +/** Access bits from a scalar. Not constant time in offset and count. */ +static unsigned int rustsecp256k1_v0_10_0_scalar_get_bits_var(const rustsecp256k1_v0_10_0_scalar *a, unsigned int offset, unsigned int count); /** Set a scalar from a big endian byte array. The scalar will be reduced modulo group order `n`. * In: bin: pointer to a 32-byte array. * Out: r: scalar to be set. * overflow: non-zero if the scalar was bigger or equal to `n` before reduction, zero otherwise (can be NULL). */ -static void rustsecp256k1_v0_9_2_scalar_set_b32(rustsecp256k1_v0_9_2_scalar *r, const unsigned char *bin, int *overflow); +static void rustsecp256k1_v0_10_0_scalar_set_b32(rustsecp256k1_v0_10_0_scalar *r, const unsigned char *bin, int *overflow); /** Set a scalar from a big endian byte array and returns 1 if it is a valid * seckey and 0 otherwise. */ -static int rustsecp256k1_v0_9_2_scalar_set_b32_seckey(rustsecp256k1_v0_9_2_scalar *r, const unsigned char *bin); +static int rustsecp256k1_v0_10_0_scalar_set_b32_seckey(rustsecp256k1_v0_10_0_scalar *r, const unsigned char *bin); /** Set a scalar to an unsigned integer. */ -static void rustsecp256k1_v0_9_2_scalar_set_int(rustsecp256k1_v0_9_2_scalar *r, unsigned int v); +static void rustsecp256k1_v0_10_0_scalar_set_int(rustsecp256k1_v0_10_0_scalar *r, unsigned int v); /** Convert a scalar to a byte array. */ -static void rustsecp256k1_v0_9_2_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_9_2_scalar* a); +static void rustsecp256k1_v0_10_0_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_10_0_scalar* a); /** Add two scalars together (modulo the group order). Returns whether it overflowed. */ -static int rustsecp256k1_v0_9_2_scalar_add(rustsecp256k1_v0_9_2_scalar *r, const rustsecp256k1_v0_9_2_scalar *a, const rustsecp256k1_v0_9_2_scalar *b); +static int rustsecp256k1_v0_10_0_scalar_add(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *a, const rustsecp256k1_v0_10_0_scalar *b); /** Conditionally add a power of two to a scalar. The result is not allowed to overflow. */ -static void rustsecp256k1_v0_9_2_scalar_cadd_bit(rustsecp256k1_v0_9_2_scalar *r, unsigned int bit, int flag); +static void rustsecp256k1_v0_10_0_scalar_cadd_bit(rustsecp256k1_v0_10_0_scalar *r, unsigned int bit, int flag); /** Multiply two scalars (modulo the group order). */ -static void rustsecp256k1_v0_9_2_scalar_mul(rustsecp256k1_v0_9_2_scalar *r, const rustsecp256k1_v0_9_2_scalar *a, const rustsecp256k1_v0_9_2_scalar *b); - -/** Shift a scalar right by some amount strictly between 0 and 16, returning - * the low bits that were shifted off */ -static int rustsecp256k1_v0_9_2_scalar_shr_int(rustsecp256k1_v0_9_2_scalar *r, int n); +static void rustsecp256k1_v0_10_0_scalar_mul(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *a, const rustsecp256k1_v0_10_0_scalar *b); /** Compute the inverse of a scalar (modulo the group order). */ -static void rustsecp256k1_v0_9_2_scalar_inverse(rustsecp256k1_v0_9_2_scalar *r, const rustsecp256k1_v0_9_2_scalar *a); +static void rustsecp256k1_v0_10_0_scalar_inverse(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *a); /** Compute the inverse of a scalar (modulo the group order), without constant-time guarantee. */ -static void rustsecp256k1_v0_9_2_scalar_inverse_var(rustsecp256k1_v0_9_2_scalar *r, const rustsecp256k1_v0_9_2_scalar *a); +static void rustsecp256k1_v0_10_0_scalar_inverse_var(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *a); /** Compute the complement of a scalar (modulo the group order). */ -static void rustsecp256k1_v0_9_2_scalar_negate(rustsecp256k1_v0_9_2_scalar *r, const rustsecp256k1_v0_9_2_scalar *a); +static void rustsecp256k1_v0_10_0_scalar_negate(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *a); + +/** Multiply a scalar with the multiplicative inverse of 2. */ +static void rustsecp256k1_v0_10_0_scalar_half(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *a); /** Check whether a scalar equals zero. */ -static int rustsecp256k1_v0_9_2_scalar_is_zero(const rustsecp256k1_v0_9_2_scalar *a); +static int rustsecp256k1_v0_10_0_scalar_is_zero(const rustsecp256k1_v0_10_0_scalar *a); /** Check whether a scalar equals one. */ -static int rustsecp256k1_v0_9_2_scalar_is_one(const rustsecp256k1_v0_9_2_scalar *a); +static int rustsecp256k1_v0_10_0_scalar_is_one(const rustsecp256k1_v0_10_0_scalar *a); /** Check whether a scalar, considered as an nonnegative integer, is even. */ -static int rustsecp256k1_v0_9_2_scalar_is_even(const rustsecp256k1_v0_9_2_scalar *a); +static int rustsecp256k1_v0_10_0_scalar_is_even(const rustsecp256k1_v0_10_0_scalar *a); /** Check whether a scalar is higher than the group order divided by 2. */ -static int rustsecp256k1_v0_9_2_scalar_is_high(const rustsecp256k1_v0_9_2_scalar *a); +static int rustsecp256k1_v0_10_0_scalar_is_high(const rustsecp256k1_v0_10_0_scalar *a); /** Conditionally negate a number, in constant time. * Returns -1 if the number was negated, 1 otherwise */ -static int rustsecp256k1_v0_9_2_scalar_cond_negate(rustsecp256k1_v0_9_2_scalar *a, int flag); +static int rustsecp256k1_v0_10_0_scalar_cond_negate(rustsecp256k1_v0_10_0_scalar *a, int flag); /** Compare two scalars. */ -static int rustsecp256k1_v0_9_2_scalar_eq(const rustsecp256k1_v0_9_2_scalar *a, const rustsecp256k1_v0_9_2_scalar *b); +static int rustsecp256k1_v0_10_0_scalar_eq(const rustsecp256k1_v0_10_0_scalar *a, const rustsecp256k1_v0_10_0_scalar *b); /** Find r1 and r2 such that r1+r2*2^128 = k. */ -static void rustsecp256k1_v0_9_2_scalar_split_128(rustsecp256k1_v0_9_2_scalar *r1, rustsecp256k1_v0_9_2_scalar *r2, const rustsecp256k1_v0_9_2_scalar *k); +static void rustsecp256k1_v0_10_0_scalar_split_128(rustsecp256k1_v0_10_0_scalar *r1, rustsecp256k1_v0_10_0_scalar *r2, const rustsecp256k1_v0_10_0_scalar *k); /** Find r1 and r2 such that r1+r2*lambda = k, where r1 and r2 or their - * negations are maximum 128 bits long (see rustsecp256k1_v0_9_2_ge_mul_lambda). It is + * negations are maximum 128 bits long (see rustsecp256k1_v0_10_0_ge_mul_lambda). It is * required that r1, r2, and k all point to different objects. */ -static void rustsecp256k1_v0_9_2_scalar_split_lambda(rustsecp256k1_v0_9_2_scalar * SECP256K1_RESTRICT r1, rustsecp256k1_v0_9_2_scalar * SECP256K1_RESTRICT r2, const rustsecp256k1_v0_9_2_scalar * SECP256K1_RESTRICT k); +static void rustsecp256k1_v0_10_0_scalar_split_lambda(rustsecp256k1_v0_10_0_scalar * SECP256K1_RESTRICT r1, rustsecp256k1_v0_10_0_scalar * SECP256K1_RESTRICT r2, const rustsecp256k1_v0_10_0_scalar * SECP256K1_RESTRICT k); /** Multiply a and b (without taking the modulus!), divide by 2**shift, and round to the nearest integer. Shift must be at least 256. */ -static void rustsecp256k1_v0_9_2_scalar_mul_shift_var(rustsecp256k1_v0_9_2_scalar *r, const rustsecp256k1_v0_9_2_scalar *a, const rustsecp256k1_v0_9_2_scalar *b, unsigned int shift); +static void rustsecp256k1_v0_10_0_scalar_mul_shift_var(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *a, const rustsecp256k1_v0_10_0_scalar *b, unsigned int shift); /** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized.*/ -static void rustsecp256k1_v0_9_2_scalar_cmov(rustsecp256k1_v0_9_2_scalar *r, const rustsecp256k1_v0_9_2_scalar *a, int flag); +static void rustsecp256k1_v0_10_0_scalar_cmov(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *a, int flag); /** Check invariants on a scalar (no-op unless VERIFY is enabled). */ -static void rustsecp256k1_v0_9_2_scalar_verify(const rustsecp256k1_v0_9_2_scalar *r); +static void rustsecp256k1_v0_10_0_scalar_verify(const rustsecp256k1_v0_10_0_scalar *r); +#define SECP256K1_SCALAR_VERIFY(r) rustsecp256k1_v0_10_0_scalar_verify(r) #endif /* SECP256K1_SCALAR_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/scalar_4x64.h b/secp256k1-sys/depend/secp256k1/src/scalar_4x64.h index c2ca481ec..cee6d188b 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar_4x64.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar_4x64.h @@ -12,7 +12,7 @@ /** A scalar modulo the group order of the secp256k1 curve. */ typedef struct { uint64_t d[4]; -} rustsecp256k1_v0_9_2_scalar; +} rustsecp256k1_v0_10_0_scalar; #define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{((uint64_t)(d1)) << 32 | (d0), ((uint64_t)(d3)) << 32 | (d2), ((uint64_t)(d5)) << 32 | (d4), ((uint64_t)(d7)) << 32 | (d6)}} diff --git a/secp256k1-sys/depend/secp256k1/src/scalar_4x64_impl.h b/secp256k1-sys/depend/secp256k1/src/scalar_4x64_impl.h index 7566e496a..c6eba307f 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar_4x64_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar_4x64_impl.h @@ -29,43 +29,43 @@ #define SECP256K1_N_H_2 ((uint64_t)0xFFFFFFFFFFFFFFFFULL) #define SECP256K1_N_H_3 ((uint64_t)0x7FFFFFFFFFFFFFFFULL) -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_scalar_clear(rustsecp256k1_v0_9_2_scalar *r) { +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_scalar_clear(rustsecp256k1_v0_10_0_scalar *r) { r->d[0] = 0; r->d[1] = 0; r->d[2] = 0; r->d[3] = 0; } -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_scalar_set_int(rustsecp256k1_v0_9_2_scalar *r, unsigned int v) { +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_scalar_set_int(rustsecp256k1_v0_10_0_scalar *r, unsigned int v) { r->d[0] = v; r->d[1] = 0; r->d[2] = 0; r->d[3] = 0; - rustsecp256k1_v0_9_2_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } -SECP256K1_INLINE static unsigned int rustsecp256k1_v0_9_2_scalar_get_bits(const rustsecp256k1_v0_9_2_scalar *a, unsigned int offset, unsigned int count) { - rustsecp256k1_v0_9_2_scalar_verify(a); +SECP256K1_INLINE static unsigned int rustsecp256k1_v0_10_0_scalar_get_bits(const rustsecp256k1_v0_10_0_scalar *a, unsigned int offset, unsigned int count) { + SECP256K1_SCALAR_VERIFY(a); VERIFY_CHECK((offset + count - 1) >> 6 == offset >> 6); return (a->d[offset >> 6] >> (offset & 0x3F)) & ((((uint64_t)1) << count) - 1); } -SECP256K1_INLINE static unsigned int rustsecp256k1_v0_9_2_scalar_get_bits_var(const rustsecp256k1_v0_9_2_scalar *a, unsigned int offset, unsigned int count) { - rustsecp256k1_v0_9_2_scalar_verify(a); +SECP256K1_INLINE static unsigned int rustsecp256k1_v0_10_0_scalar_get_bits_var(const rustsecp256k1_v0_10_0_scalar *a, unsigned int offset, unsigned int count) { + SECP256K1_SCALAR_VERIFY(a); VERIFY_CHECK(count < 32); VERIFY_CHECK(offset + count <= 256); if ((offset + count - 1) >> 6 == offset >> 6) { - return rustsecp256k1_v0_9_2_scalar_get_bits(a, offset, count); + return rustsecp256k1_v0_10_0_scalar_get_bits(a, offset, count); } else { VERIFY_CHECK((offset >> 6) + 1 < 4); return ((a->d[offset >> 6] >> (offset & 0x3F)) | (a->d[(offset >> 6) + 1] << (64 - (offset & 0x3F)))) & ((((uint64_t)1) << count) - 1); } } -SECP256K1_INLINE static int rustsecp256k1_v0_9_2_scalar_check_overflow(const rustsecp256k1_v0_9_2_scalar *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_10_0_scalar_check_overflow(const rustsecp256k1_v0_10_0_scalar *a) { int yes = 0; int no = 0; no |= (a->d[3] < SECP256K1_N_3); /* No need for a > check. */ @@ -77,138 +77,177 @@ SECP256K1_INLINE static int rustsecp256k1_v0_9_2_scalar_check_overflow(const rus return yes; } -SECP256K1_INLINE static int rustsecp256k1_v0_9_2_scalar_reduce(rustsecp256k1_v0_9_2_scalar *r, unsigned int overflow) { - rustsecp256k1_v0_9_2_uint128 t; +SECP256K1_INLINE static int rustsecp256k1_v0_10_0_scalar_reduce(rustsecp256k1_v0_10_0_scalar *r, unsigned int overflow) { + rustsecp256k1_v0_10_0_uint128 t; VERIFY_CHECK(overflow <= 1); - rustsecp256k1_v0_9_2_u128_from_u64(&t, r->d[0]); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, overflow * SECP256K1_N_C_0); - r->d[0] = rustsecp256k1_v0_9_2_u128_to_u64(&t); rustsecp256k1_v0_9_2_u128_rshift(&t, 64); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, r->d[1]); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, overflow * SECP256K1_N_C_1); - r->d[1] = rustsecp256k1_v0_9_2_u128_to_u64(&t); rustsecp256k1_v0_9_2_u128_rshift(&t, 64); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, r->d[2]); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, overflow * SECP256K1_N_C_2); - r->d[2] = rustsecp256k1_v0_9_2_u128_to_u64(&t); rustsecp256k1_v0_9_2_u128_rshift(&t, 64); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, r->d[3]); - r->d[3] = rustsecp256k1_v0_9_2_u128_to_u64(&t); - - rustsecp256k1_v0_9_2_scalar_verify(r); + rustsecp256k1_v0_10_0_u128_from_u64(&t, r->d[0]); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, overflow * SECP256K1_N_C_0); + r->d[0] = rustsecp256k1_v0_10_0_u128_to_u64(&t); rustsecp256k1_v0_10_0_u128_rshift(&t, 64); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, r->d[1]); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, overflow * SECP256K1_N_C_1); + r->d[1] = rustsecp256k1_v0_10_0_u128_to_u64(&t); rustsecp256k1_v0_10_0_u128_rshift(&t, 64); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, r->d[2]); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, overflow * SECP256K1_N_C_2); + r->d[2] = rustsecp256k1_v0_10_0_u128_to_u64(&t); rustsecp256k1_v0_10_0_u128_rshift(&t, 64); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, r->d[3]); + r->d[3] = rustsecp256k1_v0_10_0_u128_to_u64(&t); + + SECP256K1_SCALAR_VERIFY(r); return overflow; } -static int rustsecp256k1_v0_9_2_scalar_add(rustsecp256k1_v0_9_2_scalar *r, const rustsecp256k1_v0_9_2_scalar *a, const rustsecp256k1_v0_9_2_scalar *b) { +static int rustsecp256k1_v0_10_0_scalar_add(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *a, const rustsecp256k1_v0_10_0_scalar *b) { int overflow; - rustsecp256k1_v0_9_2_uint128 t; - rustsecp256k1_v0_9_2_scalar_verify(a); - rustsecp256k1_v0_9_2_scalar_verify(b); - - rustsecp256k1_v0_9_2_u128_from_u64(&t, a->d[0]); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, b->d[0]); - r->d[0] = rustsecp256k1_v0_9_2_u128_to_u64(&t); rustsecp256k1_v0_9_2_u128_rshift(&t, 64); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, a->d[1]); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, b->d[1]); - r->d[1] = rustsecp256k1_v0_9_2_u128_to_u64(&t); rustsecp256k1_v0_9_2_u128_rshift(&t, 64); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, a->d[2]); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, b->d[2]); - r->d[2] = rustsecp256k1_v0_9_2_u128_to_u64(&t); rustsecp256k1_v0_9_2_u128_rshift(&t, 64); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, a->d[3]); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, b->d[3]); - r->d[3] = rustsecp256k1_v0_9_2_u128_to_u64(&t); rustsecp256k1_v0_9_2_u128_rshift(&t, 64); - overflow = rustsecp256k1_v0_9_2_u128_to_u64(&t) + rustsecp256k1_v0_9_2_scalar_check_overflow(r); + rustsecp256k1_v0_10_0_uint128 t; + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); + + rustsecp256k1_v0_10_0_u128_from_u64(&t, a->d[0]); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, b->d[0]); + r->d[0] = rustsecp256k1_v0_10_0_u128_to_u64(&t); rustsecp256k1_v0_10_0_u128_rshift(&t, 64); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, a->d[1]); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, b->d[1]); + r->d[1] = rustsecp256k1_v0_10_0_u128_to_u64(&t); rustsecp256k1_v0_10_0_u128_rshift(&t, 64); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, a->d[2]); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, b->d[2]); + r->d[2] = rustsecp256k1_v0_10_0_u128_to_u64(&t); rustsecp256k1_v0_10_0_u128_rshift(&t, 64); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, a->d[3]); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, b->d[3]); + r->d[3] = rustsecp256k1_v0_10_0_u128_to_u64(&t); rustsecp256k1_v0_10_0_u128_rshift(&t, 64); + overflow = rustsecp256k1_v0_10_0_u128_to_u64(&t) + rustsecp256k1_v0_10_0_scalar_check_overflow(r); VERIFY_CHECK(overflow == 0 || overflow == 1); - rustsecp256k1_v0_9_2_scalar_reduce(r, overflow); + rustsecp256k1_v0_10_0_scalar_reduce(r, overflow); - rustsecp256k1_v0_9_2_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); return overflow; } -static void rustsecp256k1_v0_9_2_scalar_cadd_bit(rustsecp256k1_v0_9_2_scalar *r, unsigned int bit, int flag) { - rustsecp256k1_v0_9_2_uint128 t; +static void rustsecp256k1_v0_10_0_scalar_cadd_bit(rustsecp256k1_v0_10_0_scalar *r, unsigned int bit, int flag) { + rustsecp256k1_v0_10_0_uint128 t; volatile int vflag = flag; - rustsecp256k1_v0_9_2_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); VERIFY_CHECK(bit < 256); bit += ((uint32_t) vflag - 1) & 0x100; /* forcing (bit >> 6) > 3 makes this a noop */ - rustsecp256k1_v0_9_2_u128_from_u64(&t, r->d[0]); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 0)) << (bit & 0x3F)); - r->d[0] = rustsecp256k1_v0_9_2_u128_to_u64(&t); rustsecp256k1_v0_9_2_u128_rshift(&t, 64); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, r->d[1]); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 1)) << (bit & 0x3F)); - r->d[1] = rustsecp256k1_v0_9_2_u128_to_u64(&t); rustsecp256k1_v0_9_2_u128_rshift(&t, 64); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, r->d[2]); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 2)) << (bit & 0x3F)); - r->d[2] = rustsecp256k1_v0_9_2_u128_to_u64(&t); rustsecp256k1_v0_9_2_u128_rshift(&t, 64); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, r->d[3]); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 3)) << (bit & 0x3F)); - r->d[3] = rustsecp256k1_v0_9_2_u128_to_u64(&t); - - rustsecp256k1_v0_9_2_scalar_verify(r); -#ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_9_2_u128_hi_u64(&t) == 0); -#endif -} - -static void rustsecp256k1_v0_9_2_scalar_set_b32(rustsecp256k1_v0_9_2_scalar *r, const unsigned char *b32, int *overflow) { + rustsecp256k1_v0_10_0_u128_from_u64(&t, r->d[0]); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 0)) << (bit & 0x3F)); + r->d[0] = rustsecp256k1_v0_10_0_u128_to_u64(&t); rustsecp256k1_v0_10_0_u128_rshift(&t, 64); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, r->d[1]); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 1)) << (bit & 0x3F)); + r->d[1] = rustsecp256k1_v0_10_0_u128_to_u64(&t); rustsecp256k1_v0_10_0_u128_rshift(&t, 64); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, r->d[2]); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 2)) << (bit & 0x3F)); + r->d[2] = rustsecp256k1_v0_10_0_u128_to_u64(&t); rustsecp256k1_v0_10_0_u128_rshift(&t, 64); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, r->d[3]); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 3)) << (bit & 0x3F)); + r->d[3] = rustsecp256k1_v0_10_0_u128_to_u64(&t); + + SECP256K1_SCALAR_VERIFY(r); + VERIFY_CHECK(rustsecp256k1_v0_10_0_u128_hi_u64(&t) == 0); +} + +static void rustsecp256k1_v0_10_0_scalar_set_b32(rustsecp256k1_v0_10_0_scalar *r, const unsigned char *b32, int *overflow) { int over; - r->d[0] = rustsecp256k1_v0_9_2_read_be64(&b32[24]); - r->d[1] = rustsecp256k1_v0_9_2_read_be64(&b32[16]); - r->d[2] = rustsecp256k1_v0_9_2_read_be64(&b32[8]); - r->d[3] = rustsecp256k1_v0_9_2_read_be64(&b32[0]); - over = rustsecp256k1_v0_9_2_scalar_reduce(r, rustsecp256k1_v0_9_2_scalar_check_overflow(r)); + r->d[0] = rustsecp256k1_v0_10_0_read_be64(&b32[24]); + r->d[1] = rustsecp256k1_v0_10_0_read_be64(&b32[16]); + r->d[2] = rustsecp256k1_v0_10_0_read_be64(&b32[8]); + r->d[3] = rustsecp256k1_v0_10_0_read_be64(&b32[0]); + over = rustsecp256k1_v0_10_0_scalar_reduce(r, rustsecp256k1_v0_10_0_scalar_check_overflow(r)); if (overflow) { *overflow = over; } - rustsecp256k1_v0_9_2_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } -static void rustsecp256k1_v0_9_2_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_9_2_scalar* a) { - rustsecp256k1_v0_9_2_scalar_verify(a); +static void rustsecp256k1_v0_10_0_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_10_0_scalar* a) { + SECP256K1_SCALAR_VERIFY(a); - rustsecp256k1_v0_9_2_write_be64(&bin[0], a->d[3]); - rustsecp256k1_v0_9_2_write_be64(&bin[8], a->d[2]); - rustsecp256k1_v0_9_2_write_be64(&bin[16], a->d[1]); - rustsecp256k1_v0_9_2_write_be64(&bin[24], a->d[0]); + rustsecp256k1_v0_10_0_write_be64(&bin[0], a->d[3]); + rustsecp256k1_v0_10_0_write_be64(&bin[8], a->d[2]); + rustsecp256k1_v0_10_0_write_be64(&bin[16], a->d[1]); + rustsecp256k1_v0_10_0_write_be64(&bin[24], a->d[0]); } -SECP256K1_INLINE static int rustsecp256k1_v0_9_2_scalar_is_zero(const rustsecp256k1_v0_9_2_scalar *a) { - rustsecp256k1_v0_9_2_scalar_verify(a); +SECP256K1_INLINE static int rustsecp256k1_v0_10_0_scalar_is_zero(const rustsecp256k1_v0_10_0_scalar *a) { + SECP256K1_SCALAR_VERIFY(a); return (a->d[0] | a->d[1] | a->d[2] | a->d[3]) == 0; } -static void rustsecp256k1_v0_9_2_scalar_negate(rustsecp256k1_v0_9_2_scalar *r, const rustsecp256k1_v0_9_2_scalar *a) { - uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (rustsecp256k1_v0_9_2_scalar_is_zero(a) == 0); - rustsecp256k1_v0_9_2_uint128 t; - rustsecp256k1_v0_9_2_scalar_verify(a); - - rustsecp256k1_v0_9_2_u128_from_u64(&t, ~a->d[0]); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, SECP256K1_N_0 + 1); - r->d[0] = rustsecp256k1_v0_9_2_u128_to_u64(&t) & nonzero; rustsecp256k1_v0_9_2_u128_rshift(&t, 64); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, ~a->d[1]); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, SECP256K1_N_1); - r->d[1] = rustsecp256k1_v0_9_2_u128_to_u64(&t) & nonzero; rustsecp256k1_v0_9_2_u128_rshift(&t, 64); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, ~a->d[2]); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, SECP256K1_N_2); - r->d[2] = rustsecp256k1_v0_9_2_u128_to_u64(&t) & nonzero; rustsecp256k1_v0_9_2_u128_rshift(&t, 64); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, ~a->d[3]); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, SECP256K1_N_3); - r->d[3] = rustsecp256k1_v0_9_2_u128_to_u64(&t) & nonzero; - - rustsecp256k1_v0_9_2_scalar_verify(r); +static void rustsecp256k1_v0_10_0_scalar_negate(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *a) { + uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (rustsecp256k1_v0_10_0_scalar_is_zero(a) == 0); + rustsecp256k1_v0_10_0_uint128 t; + SECP256K1_SCALAR_VERIFY(a); + + rustsecp256k1_v0_10_0_u128_from_u64(&t, ~a->d[0]); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, SECP256K1_N_0 + 1); + r->d[0] = rustsecp256k1_v0_10_0_u128_to_u64(&t) & nonzero; rustsecp256k1_v0_10_0_u128_rshift(&t, 64); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, ~a->d[1]); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, SECP256K1_N_1); + r->d[1] = rustsecp256k1_v0_10_0_u128_to_u64(&t) & nonzero; rustsecp256k1_v0_10_0_u128_rshift(&t, 64); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, ~a->d[2]); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, SECP256K1_N_2); + r->d[2] = rustsecp256k1_v0_10_0_u128_to_u64(&t) & nonzero; rustsecp256k1_v0_10_0_u128_rshift(&t, 64); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, ~a->d[3]); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, SECP256K1_N_3); + r->d[3] = rustsecp256k1_v0_10_0_u128_to_u64(&t) & nonzero; + + SECP256K1_SCALAR_VERIFY(r); +} + +static void rustsecp256k1_v0_10_0_scalar_half(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *a) { + /* Writing `/` for field division and `//` for integer division, we compute + * + * a/2 = (a - (a&1))/2 + (a&1)/2 + * = (a >> 1) + (a&1 ? 1/2 : 0) + * = (a >> 1) + (a&1 ? n//2+1 : 0), + * + * where n is the group order and in the last equality we have used 1/2 = n//2+1 (mod n). + * For n//2, we have the constants SECP256K1_N_H_0, ... + * + * This sum does not overflow. The most extreme case is a = -2, the largest odd scalar. Here: + * - the left summand is: a >> 1 = (a - a&1)/2 = (n-2-1)//2 = (n-3)//2 + * - the right summand is: a&1 ? n//2+1 : 0 = n//2+1 = (n-1)//2 + 2//2 = (n+1)//2 + * Together they sum to (n-3)//2 + (n+1)//2 = (2n-2)//2 = n - 1, which is less than n. + */ + uint64_t mask = -(uint64_t)(a->d[0] & 1U); + rustsecp256k1_v0_10_0_uint128 t; + SECP256K1_SCALAR_VERIFY(a); + + rustsecp256k1_v0_10_0_u128_from_u64(&t, (a->d[0] >> 1) | (a->d[1] << 63)); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, (SECP256K1_N_H_0 + 1U) & mask); + r->d[0] = rustsecp256k1_v0_10_0_u128_to_u64(&t); rustsecp256k1_v0_10_0_u128_rshift(&t, 64); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, (a->d[1] >> 1) | (a->d[2] << 63)); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, SECP256K1_N_H_1 & mask); + r->d[1] = rustsecp256k1_v0_10_0_u128_to_u64(&t); rustsecp256k1_v0_10_0_u128_rshift(&t, 64); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, (a->d[2] >> 1) | (a->d[3] << 63)); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, SECP256K1_N_H_2 & mask); + r->d[2] = rustsecp256k1_v0_10_0_u128_to_u64(&t); rustsecp256k1_v0_10_0_u128_rshift(&t, 64); + r->d[3] = rustsecp256k1_v0_10_0_u128_to_u64(&t) + (a->d[3] >> 1) + (SECP256K1_N_H_3 & mask); +#ifdef VERIFY + /* The line above only computed the bottom 64 bits of r->d[3]; redo the computation + * in full 128 bits to make sure the top 64 bits are indeed zero. */ + rustsecp256k1_v0_10_0_u128_accum_u64(&t, a->d[3] >> 1); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, SECP256K1_N_H_3 & mask); + rustsecp256k1_v0_10_0_u128_rshift(&t, 64); + VERIFY_CHECK(rustsecp256k1_v0_10_0_u128_to_u64(&t) == 0); + + SECP256K1_SCALAR_VERIFY(r); +#endif } -SECP256K1_INLINE static int rustsecp256k1_v0_9_2_scalar_is_one(const rustsecp256k1_v0_9_2_scalar *a) { - rustsecp256k1_v0_9_2_scalar_verify(a); +SECP256K1_INLINE static int rustsecp256k1_v0_10_0_scalar_is_one(const rustsecp256k1_v0_10_0_scalar *a) { + SECP256K1_SCALAR_VERIFY(a); return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3]) == 0; } -static int rustsecp256k1_v0_9_2_scalar_is_high(const rustsecp256k1_v0_9_2_scalar *a) { +static int rustsecp256k1_v0_10_0_scalar_is_high(const rustsecp256k1_v0_10_0_scalar *a) { int yes = 0; int no = 0; - rustsecp256k1_v0_9_2_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); no |= (a->d[3] < SECP256K1_N_H_3); yes |= (a->d[3] > SECP256K1_N_H_3) & ~no; @@ -219,29 +258,29 @@ static int rustsecp256k1_v0_9_2_scalar_is_high(const rustsecp256k1_v0_9_2_scalar return yes; } -static int rustsecp256k1_v0_9_2_scalar_cond_negate(rustsecp256k1_v0_9_2_scalar *r, int flag) { +static int rustsecp256k1_v0_10_0_scalar_cond_negate(rustsecp256k1_v0_10_0_scalar *r, int flag) { /* If we are flag = 0, mask = 00...00 and this is a no-op; - * if we are flag = 1, mask = 11...11 and this is identical to rustsecp256k1_v0_9_2_scalar_negate */ + * if we are flag = 1, mask = 11...11 and this is identical to rustsecp256k1_v0_10_0_scalar_negate */ volatile int vflag = flag; uint64_t mask = -vflag; - uint64_t nonzero = (rustsecp256k1_v0_9_2_scalar_is_zero(r) != 0) - 1; - rustsecp256k1_v0_9_2_uint128 t; - rustsecp256k1_v0_9_2_scalar_verify(r); - - rustsecp256k1_v0_9_2_u128_from_u64(&t, r->d[0] ^ mask); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, (SECP256K1_N_0 + 1) & mask); - r->d[0] = rustsecp256k1_v0_9_2_u128_to_u64(&t) & nonzero; rustsecp256k1_v0_9_2_u128_rshift(&t, 64); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, r->d[1] ^ mask); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, SECP256K1_N_1 & mask); - r->d[1] = rustsecp256k1_v0_9_2_u128_to_u64(&t) & nonzero; rustsecp256k1_v0_9_2_u128_rshift(&t, 64); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, r->d[2] ^ mask); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, SECP256K1_N_2 & mask); - r->d[2] = rustsecp256k1_v0_9_2_u128_to_u64(&t) & nonzero; rustsecp256k1_v0_9_2_u128_rshift(&t, 64); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, r->d[3] ^ mask); - rustsecp256k1_v0_9_2_u128_accum_u64(&t, SECP256K1_N_3 & mask); - r->d[3] = rustsecp256k1_v0_9_2_u128_to_u64(&t) & nonzero; - - rustsecp256k1_v0_9_2_scalar_verify(r); + uint64_t nonzero = (rustsecp256k1_v0_10_0_scalar_is_zero(r) != 0) - 1; + rustsecp256k1_v0_10_0_uint128 t; + SECP256K1_SCALAR_VERIFY(r); + + rustsecp256k1_v0_10_0_u128_from_u64(&t, r->d[0] ^ mask); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, (SECP256K1_N_0 + 1) & mask); + r->d[0] = rustsecp256k1_v0_10_0_u128_to_u64(&t) & nonzero; rustsecp256k1_v0_10_0_u128_rshift(&t, 64); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, r->d[1] ^ mask); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, SECP256K1_N_1 & mask); + r->d[1] = rustsecp256k1_v0_10_0_u128_to_u64(&t) & nonzero; rustsecp256k1_v0_10_0_u128_rshift(&t, 64); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, r->d[2] ^ mask); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, SECP256K1_N_2 & mask); + r->d[2] = rustsecp256k1_v0_10_0_u128_to_u64(&t) & nonzero; rustsecp256k1_v0_10_0_u128_rshift(&t, 64); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, r->d[3] ^ mask); + rustsecp256k1_v0_10_0_u128_accum_u64(&t, SECP256K1_N_3 & mask); + r->d[3] = rustsecp256k1_v0_10_0_u128_to_u64(&t) & nonzero; + + SECP256K1_SCALAR_VERIFY(r); return 2 * (mask == 0) - 1; } @@ -251,10 +290,10 @@ static int rustsecp256k1_v0_9_2_scalar_cond_negate(rustsecp256k1_v0_9_2_scalar * #define muladd(a,b) { \ uint64_t tl, th; \ { \ - rustsecp256k1_v0_9_2_uint128 t; \ - rustsecp256k1_v0_9_2_u128_mul(&t, a, b); \ - th = rustsecp256k1_v0_9_2_u128_hi_u64(&t); /* at most 0xFFFFFFFFFFFFFFFE */ \ - tl = rustsecp256k1_v0_9_2_u128_to_u64(&t); \ + rustsecp256k1_v0_10_0_uint128 t; \ + rustsecp256k1_v0_10_0_u128_mul(&t, a, b); \ + th = rustsecp256k1_v0_10_0_u128_hi_u64(&t); /* at most 0xFFFFFFFFFFFFFFFE */ \ + tl = rustsecp256k1_v0_10_0_u128_to_u64(&t); \ } \ c0 += tl; /* overflow is handled on the next line */ \ th += (c0 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \ @@ -267,10 +306,10 @@ static int rustsecp256k1_v0_9_2_scalar_cond_negate(rustsecp256k1_v0_9_2_scalar * #define muladd_fast(a,b) { \ uint64_t tl, th; \ { \ - rustsecp256k1_v0_9_2_uint128 t; \ - rustsecp256k1_v0_9_2_u128_mul(&t, a, b); \ - th = rustsecp256k1_v0_9_2_u128_hi_u64(&t); /* at most 0xFFFFFFFFFFFFFFFE */ \ - tl = rustsecp256k1_v0_9_2_u128_to_u64(&t); \ + rustsecp256k1_v0_10_0_uint128 t; \ + rustsecp256k1_v0_10_0_u128_mul(&t, a, b); \ + th = rustsecp256k1_v0_10_0_u128_hi_u64(&t); /* at most 0xFFFFFFFFFFFFFFFE */ \ + tl = rustsecp256k1_v0_10_0_u128_to_u64(&t); \ } \ c0 += tl; /* overflow is handled on the next line */ \ th += (c0 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \ @@ -311,7 +350,7 @@ static int rustsecp256k1_v0_9_2_scalar_cond_negate(rustsecp256k1_v0_9_2_scalar * VERIFY_CHECK(c2 == 0); \ } -static void rustsecp256k1_v0_9_2_scalar_reduce_512(rustsecp256k1_v0_9_2_scalar *r, const uint64_t *l) { +static void rustsecp256k1_v0_10_0_scalar_reduce_512(rustsecp256k1_v0_10_0_scalar *r, const uint64_t *l) { #ifdef USE_ASM_X86_64 /* Reduce 512 bits into 385. */ uint64_t m0, m1, m2, m3, m4, m5, m6; @@ -548,7 +587,7 @@ static void rustsecp256k1_v0_9_2_scalar_reduce_512(rustsecp256k1_v0_9_2_scalar * : "g"(p0), "g"(p1), "g"(p2), "g"(p3), "g"(p4), "D"(r), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1) : "rax", "rdx", "r8", "r9", "r10", "cc", "memory"); #else - rustsecp256k1_v0_9_2_uint128 c128; + rustsecp256k1_v0_10_0_uint128 c128; uint64_t c, c0, c1, c2; uint64_t n0 = l[4], n1 = l[5], n2 = l[6], n3 = l[7]; uint64_t m0, m1, m2, m3, m4, m5; @@ -606,25 +645,25 @@ static void rustsecp256k1_v0_9_2_scalar_reduce_512(rustsecp256k1_v0_9_2_scalar * /* Reduce 258 bits into 256. */ /* r[0..3] = p[0..3] + p[4] * SECP256K1_N_C. */ - rustsecp256k1_v0_9_2_u128_from_u64(&c128, p0); - rustsecp256k1_v0_9_2_u128_accum_mul(&c128, SECP256K1_N_C_0, p4); - r->d[0] = rustsecp256k1_v0_9_2_u128_to_u64(&c128); rustsecp256k1_v0_9_2_u128_rshift(&c128, 64); - rustsecp256k1_v0_9_2_u128_accum_u64(&c128, p1); - rustsecp256k1_v0_9_2_u128_accum_mul(&c128, SECP256K1_N_C_1, p4); - r->d[1] = rustsecp256k1_v0_9_2_u128_to_u64(&c128); rustsecp256k1_v0_9_2_u128_rshift(&c128, 64); - rustsecp256k1_v0_9_2_u128_accum_u64(&c128, p2); - rustsecp256k1_v0_9_2_u128_accum_u64(&c128, p4); - r->d[2] = rustsecp256k1_v0_9_2_u128_to_u64(&c128); rustsecp256k1_v0_9_2_u128_rshift(&c128, 64); - rustsecp256k1_v0_9_2_u128_accum_u64(&c128, p3); - r->d[3] = rustsecp256k1_v0_9_2_u128_to_u64(&c128); - c = rustsecp256k1_v0_9_2_u128_hi_u64(&c128); + rustsecp256k1_v0_10_0_u128_from_u64(&c128, p0); + rustsecp256k1_v0_10_0_u128_accum_mul(&c128, SECP256K1_N_C_0, p4); + r->d[0] = rustsecp256k1_v0_10_0_u128_to_u64(&c128); rustsecp256k1_v0_10_0_u128_rshift(&c128, 64); + rustsecp256k1_v0_10_0_u128_accum_u64(&c128, p1); + rustsecp256k1_v0_10_0_u128_accum_mul(&c128, SECP256K1_N_C_1, p4); + r->d[1] = rustsecp256k1_v0_10_0_u128_to_u64(&c128); rustsecp256k1_v0_10_0_u128_rshift(&c128, 64); + rustsecp256k1_v0_10_0_u128_accum_u64(&c128, p2); + rustsecp256k1_v0_10_0_u128_accum_u64(&c128, p4); + r->d[2] = rustsecp256k1_v0_10_0_u128_to_u64(&c128); rustsecp256k1_v0_10_0_u128_rshift(&c128, 64); + rustsecp256k1_v0_10_0_u128_accum_u64(&c128, p3); + r->d[3] = rustsecp256k1_v0_10_0_u128_to_u64(&c128); + c = rustsecp256k1_v0_10_0_u128_hi_u64(&c128); #endif /* Final reduction of r. */ - rustsecp256k1_v0_9_2_scalar_reduce(r, c + rustsecp256k1_v0_9_2_scalar_check_overflow(r)); + rustsecp256k1_v0_10_0_scalar_reduce(r, c + rustsecp256k1_v0_10_0_scalar_check_overflow(r)); } -static void rustsecp256k1_v0_9_2_scalar_mul_512(uint64_t l[8], const rustsecp256k1_v0_9_2_scalar *a, const rustsecp256k1_v0_9_2_scalar *b) { +static void rustsecp256k1_v0_10_0_scalar_mul_512(uint64_t l[8], const rustsecp256k1_v0_10_0_scalar *a, const rustsecp256k1_v0_10_0_scalar *b) { #ifdef USE_ASM_X86_64 const uint64_t *pb = b->d; __asm__ __volatile__( @@ -798,35 +837,19 @@ static void rustsecp256k1_v0_9_2_scalar_mul_512(uint64_t l[8], const rustsecp256 #undef extract #undef extract_fast -static void rustsecp256k1_v0_9_2_scalar_mul(rustsecp256k1_v0_9_2_scalar *r, const rustsecp256k1_v0_9_2_scalar *a, const rustsecp256k1_v0_9_2_scalar *b) { +static void rustsecp256k1_v0_10_0_scalar_mul(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *a, const rustsecp256k1_v0_10_0_scalar *b) { uint64_t l[8]; - rustsecp256k1_v0_9_2_scalar_verify(a); - rustsecp256k1_v0_9_2_scalar_verify(b); + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); - rustsecp256k1_v0_9_2_scalar_mul_512(l, a, b); - rustsecp256k1_v0_9_2_scalar_reduce_512(r, l); + rustsecp256k1_v0_10_0_scalar_mul_512(l, a, b); + rustsecp256k1_v0_10_0_scalar_reduce_512(r, l); - rustsecp256k1_v0_9_2_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } -static int rustsecp256k1_v0_9_2_scalar_shr_int(rustsecp256k1_v0_9_2_scalar *r, int n) { - int ret; - rustsecp256k1_v0_9_2_scalar_verify(r); - VERIFY_CHECK(n > 0); - VERIFY_CHECK(n < 16); - - ret = r->d[0] & ((1 << n) - 1); - r->d[0] = (r->d[0] >> n) + (r->d[1] << (64 - n)); - r->d[1] = (r->d[1] >> n) + (r->d[2] << (64 - n)); - r->d[2] = (r->d[2] >> n) + (r->d[3] << (64 - n)); - r->d[3] = (r->d[3] >> n); - - rustsecp256k1_v0_9_2_scalar_verify(r); - return ret; -} - -static void rustsecp256k1_v0_9_2_scalar_split_128(rustsecp256k1_v0_9_2_scalar *r1, rustsecp256k1_v0_9_2_scalar *r2, const rustsecp256k1_v0_9_2_scalar *k) { - rustsecp256k1_v0_9_2_scalar_verify(k); +static void rustsecp256k1_v0_10_0_scalar_split_128(rustsecp256k1_v0_10_0_scalar *r1, rustsecp256k1_v0_10_0_scalar *r2, const rustsecp256k1_v0_10_0_scalar *k) { + SECP256K1_SCALAR_VERIFY(k); r1->d[0] = k->d[0]; r1->d[1] = k->d[1]; @@ -837,27 +860,27 @@ static void rustsecp256k1_v0_9_2_scalar_split_128(rustsecp256k1_v0_9_2_scalar *r r2->d[2] = 0; r2->d[3] = 0; - rustsecp256k1_v0_9_2_scalar_verify(r1); - rustsecp256k1_v0_9_2_scalar_verify(r2); + SECP256K1_SCALAR_VERIFY(r1); + SECP256K1_SCALAR_VERIFY(r2); } -SECP256K1_INLINE static int rustsecp256k1_v0_9_2_scalar_eq(const rustsecp256k1_v0_9_2_scalar *a, const rustsecp256k1_v0_9_2_scalar *b) { - rustsecp256k1_v0_9_2_scalar_verify(a); - rustsecp256k1_v0_9_2_scalar_verify(b); +SECP256K1_INLINE static int rustsecp256k1_v0_10_0_scalar_eq(const rustsecp256k1_v0_10_0_scalar *a, const rustsecp256k1_v0_10_0_scalar *b) { + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0; } -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_scalar_mul_shift_var(rustsecp256k1_v0_9_2_scalar *r, const rustsecp256k1_v0_9_2_scalar *a, const rustsecp256k1_v0_9_2_scalar *b, unsigned int shift) { +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_scalar_mul_shift_var(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *a, const rustsecp256k1_v0_10_0_scalar *b, unsigned int shift) { uint64_t l[8]; unsigned int shiftlimbs; unsigned int shiftlow; unsigned int shifthigh; - rustsecp256k1_v0_9_2_scalar_verify(a); - rustsecp256k1_v0_9_2_scalar_verify(b); + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); VERIFY_CHECK(shift >= 256); - rustsecp256k1_v0_9_2_scalar_mul_512(l, a, b); + rustsecp256k1_v0_10_0_scalar_mul_512(l, a, b); shiftlimbs = shift >> 6; shiftlow = shift & 0x3F; shifthigh = 64 - shiftlow; @@ -865,15 +888,15 @@ SECP256K1_INLINE static void rustsecp256k1_v0_9_2_scalar_mul_shift_var(rustsecp2 r->d[1] = shift < 448 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0; r->d[2] = shift < 384 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0; r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0; - rustsecp256k1_v0_9_2_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1); + rustsecp256k1_v0_10_0_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1); - rustsecp256k1_v0_9_2_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_scalar_cmov(rustsecp256k1_v0_9_2_scalar *r, const rustsecp256k1_v0_9_2_scalar *a, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_scalar_cmov(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *a, int flag) { uint64_t mask0, mask1; volatile int vflag = flag; - rustsecp256k1_v0_9_2_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); SECP256K1_CHECKMEM_CHECK_VERIFY(r->d, sizeof(r->d)); mask0 = vflag + ~((uint64_t)0); @@ -883,13 +906,13 @@ static SECP256K1_INLINE void rustsecp256k1_v0_9_2_scalar_cmov(rustsecp256k1_v0_9 r->d[2] = (r->d[2] & mask0) | (a->d[2] & mask1); r->d[3] = (r->d[3] & mask0) | (a->d[3] & mask1); - rustsecp256k1_v0_9_2_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } -static void rustsecp256k1_v0_9_2_scalar_from_signed62(rustsecp256k1_v0_9_2_scalar *r, const rustsecp256k1_v0_9_2_modinv64_signed62 *a) { +static void rustsecp256k1_v0_10_0_scalar_from_signed62(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_modinv64_signed62 *a) { const uint64_t a0 = a->v[0], a1 = a->v[1], a2 = a->v[2], a3 = a->v[3], a4 = a->v[4]; - /* The output from rustsecp256k1_v0_9_2_modinv64{_var} should be normalized to range [0,modulus), and + /* The output from rustsecp256k1_v0_10_0_modinv64{_var} should be normalized to range [0,modulus), and * have limbs in [0,2^62). The modulus is < 2^256, so the top limb must be below 2^(256-62*4). */ VERIFY_CHECK(a0 >> 62 == 0); @@ -903,13 +926,13 @@ static void rustsecp256k1_v0_9_2_scalar_from_signed62(rustsecp256k1_v0_9_2_scala r->d[2] = a2 >> 4 | a3 << 58; r->d[3] = a3 >> 6 | a4 << 56; - rustsecp256k1_v0_9_2_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } -static void rustsecp256k1_v0_9_2_scalar_to_signed62(rustsecp256k1_v0_9_2_modinv64_signed62 *r, const rustsecp256k1_v0_9_2_scalar *a) { +static void rustsecp256k1_v0_10_0_scalar_to_signed62(rustsecp256k1_v0_10_0_modinv64_signed62 *r, const rustsecp256k1_v0_10_0_scalar *a) { const uint64_t M62 = UINT64_MAX >> 2; const uint64_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3]; - rustsecp256k1_v0_9_2_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); r->v[0] = a0 & M62; r->v[1] = (a0 >> 62 | a1 << 2) & M62; @@ -918,47 +941,43 @@ static void rustsecp256k1_v0_9_2_scalar_to_signed62(rustsecp256k1_v0_9_2_modinv6 r->v[4] = a3 >> 56; } -static const rustsecp256k1_v0_9_2_modinv64_modinfo rustsecp256k1_v0_9_2_const_modinfo_scalar = { +static const rustsecp256k1_v0_10_0_modinv64_modinfo rustsecp256k1_v0_10_0_const_modinfo_scalar = { {{0x3FD25E8CD0364141LL, 0x2ABB739ABD2280EELL, -0x15LL, 0, 256}}, 0x34F20099AA774EC1LL }; -static void rustsecp256k1_v0_9_2_scalar_inverse(rustsecp256k1_v0_9_2_scalar *r, const rustsecp256k1_v0_9_2_scalar *x) { - rustsecp256k1_v0_9_2_modinv64_signed62 s; +static void rustsecp256k1_v0_10_0_scalar_inverse(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *x) { + rustsecp256k1_v0_10_0_modinv64_signed62 s; #ifdef VERIFY - int zero_in = rustsecp256k1_v0_9_2_scalar_is_zero(x); + int zero_in = rustsecp256k1_v0_10_0_scalar_is_zero(x); #endif - rustsecp256k1_v0_9_2_scalar_verify(x); + SECP256K1_SCALAR_VERIFY(x); - rustsecp256k1_v0_9_2_scalar_to_signed62(&s, x); - rustsecp256k1_v0_9_2_modinv64(&s, &rustsecp256k1_v0_9_2_const_modinfo_scalar); - rustsecp256k1_v0_9_2_scalar_from_signed62(r, &s); + rustsecp256k1_v0_10_0_scalar_to_signed62(&s, x); + rustsecp256k1_v0_10_0_modinv64(&s, &rustsecp256k1_v0_10_0_const_modinfo_scalar); + rustsecp256k1_v0_10_0_scalar_from_signed62(r, &s); - rustsecp256k1_v0_9_2_scalar_verify(r); -#ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_9_2_scalar_is_zero(r) == zero_in); -#endif + SECP256K1_SCALAR_VERIFY(r); + VERIFY_CHECK(rustsecp256k1_v0_10_0_scalar_is_zero(r) == zero_in); } -static void rustsecp256k1_v0_9_2_scalar_inverse_var(rustsecp256k1_v0_9_2_scalar *r, const rustsecp256k1_v0_9_2_scalar *x) { - rustsecp256k1_v0_9_2_modinv64_signed62 s; +static void rustsecp256k1_v0_10_0_scalar_inverse_var(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *x) { + rustsecp256k1_v0_10_0_modinv64_signed62 s; #ifdef VERIFY - int zero_in = rustsecp256k1_v0_9_2_scalar_is_zero(x); + int zero_in = rustsecp256k1_v0_10_0_scalar_is_zero(x); #endif - rustsecp256k1_v0_9_2_scalar_verify(x); + SECP256K1_SCALAR_VERIFY(x); - rustsecp256k1_v0_9_2_scalar_to_signed62(&s, x); - rustsecp256k1_v0_9_2_modinv64_var(&s, &rustsecp256k1_v0_9_2_const_modinfo_scalar); - rustsecp256k1_v0_9_2_scalar_from_signed62(r, &s); + rustsecp256k1_v0_10_0_scalar_to_signed62(&s, x); + rustsecp256k1_v0_10_0_modinv64_var(&s, &rustsecp256k1_v0_10_0_const_modinfo_scalar); + rustsecp256k1_v0_10_0_scalar_from_signed62(r, &s); - rustsecp256k1_v0_9_2_scalar_verify(r); -#ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_9_2_scalar_is_zero(r) == zero_in); -#endif + SECP256K1_SCALAR_VERIFY(r); + VERIFY_CHECK(rustsecp256k1_v0_10_0_scalar_is_zero(r) == zero_in); } -SECP256K1_INLINE static int rustsecp256k1_v0_9_2_scalar_is_even(const rustsecp256k1_v0_9_2_scalar *a) { - rustsecp256k1_v0_9_2_scalar_verify(a); +SECP256K1_INLINE static int rustsecp256k1_v0_10_0_scalar_is_even(const rustsecp256k1_v0_10_0_scalar *a) { + SECP256K1_SCALAR_VERIFY(a); return !(a->d[0] & 1); } diff --git a/secp256k1-sys/depend/secp256k1/src/scalar_8x32.h b/secp256k1-sys/depend/secp256k1/src/scalar_8x32.h index 1d385a20c..bcba214ce 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar_8x32.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar_8x32.h @@ -12,7 +12,7 @@ /** A scalar modulo the group order of the secp256k1 curve. */ typedef struct { uint32_t d[8]; -} rustsecp256k1_v0_9_2_scalar; +} rustsecp256k1_v0_10_0_scalar; #define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{(d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7)}} diff --git a/secp256k1-sys/depend/secp256k1/src/scalar_8x32_impl.h b/secp256k1-sys/depend/secp256k1/src/scalar_8x32_impl.h index 630702c74..4aba971e2 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar_8x32_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar_8x32_impl.h @@ -38,7 +38,7 @@ #define SECP256K1_N_H_6 ((uint32_t)0xFFFFFFFFUL) #define SECP256K1_N_H_7 ((uint32_t)0x7FFFFFFFUL) -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_scalar_clear(rustsecp256k1_v0_9_2_scalar *r) { +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_scalar_clear(rustsecp256k1_v0_10_0_scalar *r) { r->d[0] = 0; r->d[1] = 0; r->d[2] = 0; @@ -49,7 +49,7 @@ SECP256K1_INLINE static void rustsecp256k1_v0_9_2_scalar_clear(rustsecp256k1_v0_ r->d[7] = 0; } -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_scalar_set_int(rustsecp256k1_v0_9_2_scalar *r, unsigned int v) { +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_scalar_set_int(rustsecp256k1_v0_10_0_scalar *r, unsigned int v) { r->d[0] = v; r->d[1] = 0; r->d[2] = 0; @@ -59,30 +59,30 @@ SECP256K1_INLINE static void rustsecp256k1_v0_9_2_scalar_set_int(rustsecp256k1_v r->d[6] = 0; r->d[7] = 0; - rustsecp256k1_v0_9_2_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } -SECP256K1_INLINE static unsigned int rustsecp256k1_v0_9_2_scalar_get_bits(const rustsecp256k1_v0_9_2_scalar *a, unsigned int offset, unsigned int count) { - rustsecp256k1_v0_9_2_scalar_verify(a); +SECP256K1_INLINE static unsigned int rustsecp256k1_v0_10_0_scalar_get_bits(const rustsecp256k1_v0_10_0_scalar *a, unsigned int offset, unsigned int count) { + SECP256K1_SCALAR_VERIFY(a); VERIFY_CHECK((offset + count - 1) >> 5 == offset >> 5); return (a->d[offset >> 5] >> (offset & 0x1F)) & ((1 << count) - 1); } -SECP256K1_INLINE static unsigned int rustsecp256k1_v0_9_2_scalar_get_bits_var(const rustsecp256k1_v0_9_2_scalar *a, unsigned int offset, unsigned int count) { - rustsecp256k1_v0_9_2_scalar_verify(a); +SECP256K1_INLINE static unsigned int rustsecp256k1_v0_10_0_scalar_get_bits_var(const rustsecp256k1_v0_10_0_scalar *a, unsigned int offset, unsigned int count) { + SECP256K1_SCALAR_VERIFY(a); VERIFY_CHECK(count < 32); VERIFY_CHECK(offset + count <= 256); if ((offset + count - 1) >> 5 == offset >> 5) { - return rustsecp256k1_v0_9_2_scalar_get_bits(a, offset, count); + return rustsecp256k1_v0_10_0_scalar_get_bits(a, offset, count); } else { VERIFY_CHECK((offset >> 5) + 1 < 8); return ((a->d[offset >> 5] >> (offset & 0x1F)) | (a->d[(offset >> 5) + 1] << (32 - (offset & 0x1F)))) & ((((uint32_t)1) << count) - 1); } } -SECP256K1_INLINE static int rustsecp256k1_v0_9_2_scalar_check_overflow(const rustsecp256k1_v0_9_2_scalar *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_10_0_scalar_check_overflow(const rustsecp256k1_v0_10_0_scalar *a) { int yes = 0; int no = 0; no |= (a->d[7] < SECP256K1_N_7); /* No need for a > check. */ @@ -100,7 +100,7 @@ SECP256K1_INLINE static int rustsecp256k1_v0_9_2_scalar_check_overflow(const rus return yes; } -SECP256K1_INLINE static int rustsecp256k1_v0_9_2_scalar_reduce(rustsecp256k1_v0_9_2_scalar *r, uint32_t overflow) { +SECP256K1_INLINE static int rustsecp256k1_v0_10_0_scalar_reduce(rustsecp256k1_v0_10_0_scalar *r, uint32_t overflow) { uint64_t t; VERIFY_CHECK(overflow <= 1); @@ -121,15 +121,15 @@ SECP256K1_INLINE static int rustsecp256k1_v0_9_2_scalar_reduce(rustsecp256k1_v0_ t += (uint64_t)r->d[7]; r->d[7] = t & 0xFFFFFFFFUL; - rustsecp256k1_v0_9_2_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); return overflow; } -static int rustsecp256k1_v0_9_2_scalar_add(rustsecp256k1_v0_9_2_scalar *r, const rustsecp256k1_v0_9_2_scalar *a, const rustsecp256k1_v0_9_2_scalar *b) { +static int rustsecp256k1_v0_10_0_scalar_add(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *a, const rustsecp256k1_v0_10_0_scalar *b) { int overflow; uint64_t t = (uint64_t)a->d[0] + b->d[0]; - rustsecp256k1_v0_9_2_scalar_verify(a); - rustsecp256k1_v0_9_2_scalar_verify(b); + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); r->d[0] = t & 0xFFFFFFFFULL; t >>= 32; t += (uint64_t)a->d[1] + b->d[1]; @@ -146,18 +146,18 @@ static int rustsecp256k1_v0_9_2_scalar_add(rustsecp256k1_v0_9_2_scalar *r, const r->d[6] = t & 0xFFFFFFFFULL; t >>= 32; t += (uint64_t)a->d[7] + b->d[7]; r->d[7] = t & 0xFFFFFFFFULL; t >>= 32; - overflow = t + rustsecp256k1_v0_9_2_scalar_check_overflow(r); + overflow = t + rustsecp256k1_v0_10_0_scalar_check_overflow(r); VERIFY_CHECK(overflow == 0 || overflow == 1); - rustsecp256k1_v0_9_2_scalar_reduce(r, overflow); + rustsecp256k1_v0_10_0_scalar_reduce(r, overflow); - rustsecp256k1_v0_9_2_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); return overflow; } -static void rustsecp256k1_v0_9_2_scalar_cadd_bit(rustsecp256k1_v0_9_2_scalar *r, unsigned int bit, int flag) { +static void rustsecp256k1_v0_10_0_scalar_cadd_bit(rustsecp256k1_v0_10_0_scalar *r, unsigned int bit, int flag) { uint64_t t; volatile int vflag = flag; - rustsecp256k1_v0_9_2_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); VERIFY_CHECK(bit < 256); bit += ((uint32_t) vflag - 1) & 0x100; /* forcing (bit >> 5) > 7 makes this a noop */ @@ -178,53 +178,51 @@ static void rustsecp256k1_v0_9_2_scalar_cadd_bit(rustsecp256k1_v0_9_2_scalar *r, t += (uint64_t)r->d[7] + (((uint32_t)((bit >> 5) == 7)) << (bit & 0x1F)); r->d[7] = t & 0xFFFFFFFFULL; - rustsecp256k1_v0_9_2_scalar_verify(r); -#ifdef VERIFY + SECP256K1_SCALAR_VERIFY(r); VERIFY_CHECK((t >> 32) == 0); -#endif } -static void rustsecp256k1_v0_9_2_scalar_set_b32(rustsecp256k1_v0_9_2_scalar *r, const unsigned char *b32, int *overflow) { +static void rustsecp256k1_v0_10_0_scalar_set_b32(rustsecp256k1_v0_10_0_scalar *r, const unsigned char *b32, int *overflow) { int over; - r->d[0] = rustsecp256k1_v0_9_2_read_be32(&b32[28]); - r->d[1] = rustsecp256k1_v0_9_2_read_be32(&b32[24]); - r->d[2] = rustsecp256k1_v0_9_2_read_be32(&b32[20]); - r->d[3] = rustsecp256k1_v0_9_2_read_be32(&b32[16]); - r->d[4] = rustsecp256k1_v0_9_2_read_be32(&b32[12]); - r->d[5] = rustsecp256k1_v0_9_2_read_be32(&b32[8]); - r->d[6] = rustsecp256k1_v0_9_2_read_be32(&b32[4]); - r->d[7] = rustsecp256k1_v0_9_2_read_be32(&b32[0]); - over = rustsecp256k1_v0_9_2_scalar_reduce(r, rustsecp256k1_v0_9_2_scalar_check_overflow(r)); + r->d[0] = rustsecp256k1_v0_10_0_read_be32(&b32[28]); + r->d[1] = rustsecp256k1_v0_10_0_read_be32(&b32[24]); + r->d[2] = rustsecp256k1_v0_10_0_read_be32(&b32[20]); + r->d[3] = rustsecp256k1_v0_10_0_read_be32(&b32[16]); + r->d[4] = rustsecp256k1_v0_10_0_read_be32(&b32[12]); + r->d[5] = rustsecp256k1_v0_10_0_read_be32(&b32[8]); + r->d[6] = rustsecp256k1_v0_10_0_read_be32(&b32[4]); + r->d[7] = rustsecp256k1_v0_10_0_read_be32(&b32[0]); + over = rustsecp256k1_v0_10_0_scalar_reduce(r, rustsecp256k1_v0_10_0_scalar_check_overflow(r)); if (overflow) { *overflow = over; } - rustsecp256k1_v0_9_2_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } -static void rustsecp256k1_v0_9_2_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_9_2_scalar* a) { - rustsecp256k1_v0_9_2_scalar_verify(a); +static void rustsecp256k1_v0_10_0_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_10_0_scalar* a) { + SECP256K1_SCALAR_VERIFY(a); - rustsecp256k1_v0_9_2_write_be32(&bin[0], a->d[7]); - rustsecp256k1_v0_9_2_write_be32(&bin[4], a->d[6]); - rustsecp256k1_v0_9_2_write_be32(&bin[8], a->d[5]); - rustsecp256k1_v0_9_2_write_be32(&bin[12], a->d[4]); - rustsecp256k1_v0_9_2_write_be32(&bin[16], a->d[3]); - rustsecp256k1_v0_9_2_write_be32(&bin[20], a->d[2]); - rustsecp256k1_v0_9_2_write_be32(&bin[24], a->d[1]); - rustsecp256k1_v0_9_2_write_be32(&bin[28], a->d[0]); + rustsecp256k1_v0_10_0_write_be32(&bin[0], a->d[7]); + rustsecp256k1_v0_10_0_write_be32(&bin[4], a->d[6]); + rustsecp256k1_v0_10_0_write_be32(&bin[8], a->d[5]); + rustsecp256k1_v0_10_0_write_be32(&bin[12], a->d[4]); + rustsecp256k1_v0_10_0_write_be32(&bin[16], a->d[3]); + rustsecp256k1_v0_10_0_write_be32(&bin[20], a->d[2]); + rustsecp256k1_v0_10_0_write_be32(&bin[24], a->d[1]); + rustsecp256k1_v0_10_0_write_be32(&bin[28], a->d[0]); } -SECP256K1_INLINE static int rustsecp256k1_v0_9_2_scalar_is_zero(const rustsecp256k1_v0_9_2_scalar *a) { - rustsecp256k1_v0_9_2_scalar_verify(a); +SECP256K1_INLINE static int rustsecp256k1_v0_10_0_scalar_is_zero(const rustsecp256k1_v0_10_0_scalar *a) { + SECP256K1_SCALAR_VERIFY(a); return (a->d[0] | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0; } -static void rustsecp256k1_v0_9_2_scalar_negate(rustsecp256k1_v0_9_2_scalar *r, const rustsecp256k1_v0_9_2_scalar *a) { - uint32_t nonzero = 0xFFFFFFFFUL * (rustsecp256k1_v0_9_2_scalar_is_zero(a) == 0); +static void rustsecp256k1_v0_10_0_scalar_negate(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *a) { + uint32_t nonzero = 0xFFFFFFFFUL * (rustsecp256k1_v0_10_0_scalar_is_zero(a) == 0); uint64_t t = (uint64_t)(~a->d[0]) + SECP256K1_N_0 + 1; - rustsecp256k1_v0_9_2_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); r->d[0] = t & nonzero; t >>= 32; t += (uint64_t)(~a->d[1]) + SECP256K1_N_1; @@ -242,19 +240,67 @@ static void rustsecp256k1_v0_9_2_scalar_negate(rustsecp256k1_v0_9_2_scalar *r, c t += (uint64_t)(~a->d[7]) + SECP256K1_N_7; r->d[7] = t & nonzero; - rustsecp256k1_v0_9_2_scalar_verify(r); -} - -SECP256K1_INLINE static int rustsecp256k1_v0_9_2_scalar_is_one(const rustsecp256k1_v0_9_2_scalar *a) { - rustsecp256k1_v0_9_2_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(r); +} + +static void rustsecp256k1_v0_10_0_scalar_half(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *a) { + /* Writing `/` for field division and `//` for integer division, we compute + * + * a/2 = (a - (a&1))/2 + (a&1)/2 + * = (a >> 1) + (a&1 ? 1/2 : 0) + * = (a >> 1) + (a&1 ? n//2+1 : 0), + * + * where n is the group order and in the last equality we have used 1/2 = n//2+1 (mod n). + * For n//2, we have the constants SECP256K1_N_H_0, ... + * + * This sum does not overflow. The most extreme case is a = -2, the largest odd scalar. Here: + * - the left summand is: a >> 1 = (a - a&1)/2 = (n-2-1)//2 = (n-3)//2 + * - the right summand is: a&1 ? n//2+1 : 0 = n//2+1 = (n-1)//2 + 2//2 = (n+1)//2 + * Together they sum to (n-3)//2 + (n+1)//2 = (2n-2)//2 = n - 1, which is less than n. + */ + uint32_t mask = -(uint32_t)(a->d[0] & 1U); + uint64_t t = (uint32_t)((a->d[0] >> 1) | (a->d[1] << 31)); + SECP256K1_SCALAR_VERIFY(a); + + t += (SECP256K1_N_H_0 + 1U) & mask; + r->d[0] = t; t >>= 32; + t += (uint32_t)((a->d[1] >> 1) | (a->d[2] << 31)); + t += SECP256K1_N_H_1 & mask; + r->d[1] = t; t >>= 32; + t += (uint32_t)((a->d[2] >> 1) | (a->d[3] << 31)); + t += SECP256K1_N_H_2 & mask; + r->d[2] = t; t >>= 32; + t += (uint32_t)((a->d[3] >> 1) | (a->d[4] << 31)); + t += SECP256K1_N_H_3 & mask; + r->d[3] = t; t >>= 32; + t += (uint32_t)((a->d[4] >> 1) | (a->d[5] << 31)); + t += SECP256K1_N_H_4 & mask; + r->d[4] = t; t >>= 32; + t += (uint32_t)((a->d[5] >> 1) | (a->d[6] << 31)); + t += SECP256K1_N_H_5 & mask; + r->d[5] = t; t >>= 32; + t += (uint32_t)((a->d[6] >> 1) | (a->d[7] << 31)); + t += SECP256K1_N_H_6 & mask; + r->d[6] = t; t >>= 32; + r->d[7] = (uint32_t)t + (uint32_t)(a->d[7] >> 1) + (SECP256K1_N_H_7 & mask); + + /* The line above only computed the bottom 32 bits of r->d[7]. Redo the computation + * in full 64 bits to make sure the top 32 bits are indeed zero. */ + VERIFY_CHECK((t + (a->d[7] >> 1) + (SECP256K1_N_H_7 & mask)) >> 32 == 0); + + SECP256K1_SCALAR_VERIFY(r); +} + +SECP256K1_INLINE static int rustsecp256k1_v0_10_0_scalar_is_one(const rustsecp256k1_v0_10_0_scalar *a) { + SECP256K1_SCALAR_VERIFY(a); return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0; } -static int rustsecp256k1_v0_9_2_scalar_is_high(const rustsecp256k1_v0_9_2_scalar *a) { +static int rustsecp256k1_v0_10_0_scalar_is_high(const rustsecp256k1_v0_10_0_scalar *a) { int yes = 0; int no = 0; - rustsecp256k1_v0_9_2_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); no |= (a->d[7] < SECP256K1_N_H_7); yes |= (a->d[7] > SECP256K1_N_H_7) & ~no; @@ -271,14 +317,14 @@ static int rustsecp256k1_v0_9_2_scalar_is_high(const rustsecp256k1_v0_9_2_scalar return yes; } -static int rustsecp256k1_v0_9_2_scalar_cond_negate(rustsecp256k1_v0_9_2_scalar *r, int flag) { +static int rustsecp256k1_v0_10_0_scalar_cond_negate(rustsecp256k1_v0_10_0_scalar *r, int flag) { /* If we are flag = 0, mask = 00...00 and this is a no-op; - * if we are flag = 1, mask = 11...11 and this is identical to rustsecp256k1_v0_9_2_scalar_negate */ + * if we are flag = 1, mask = 11...11 and this is identical to rustsecp256k1_v0_10_0_scalar_negate */ volatile int vflag = flag; uint32_t mask = -vflag; - uint32_t nonzero = 0xFFFFFFFFUL * (rustsecp256k1_v0_9_2_scalar_is_zero(r) == 0); + uint32_t nonzero = 0xFFFFFFFFUL * (rustsecp256k1_v0_10_0_scalar_is_zero(r) == 0); uint64_t t = (uint64_t)(r->d[0] ^ mask) + ((SECP256K1_N_0 + 1) & mask); - rustsecp256k1_v0_9_2_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); r->d[0] = t & nonzero; t >>= 32; t += (uint64_t)(r->d[1] ^ mask) + (SECP256K1_N_1 & mask); @@ -296,7 +342,7 @@ static int rustsecp256k1_v0_9_2_scalar_cond_negate(rustsecp256k1_v0_9_2_scalar * t += (uint64_t)(r->d[7] ^ mask) + (SECP256K1_N_7 & mask); r->d[7] = t & nonzero; - rustsecp256k1_v0_9_2_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); return 2 * (mask == 0) - 1; } @@ -365,7 +411,7 @@ static int rustsecp256k1_v0_9_2_scalar_cond_negate(rustsecp256k1_v0_9_2_scalar * VERIFY_CHECK(c2 == 0); \ } -static void rustsecp256k1_v0_9_2_scalar_reduce_512(rustsecp256k1_v0_9_2_scalar *r, const uint32_t *l) { +static void rustsecp256k1_v0_10_0_scalar_reduce_512(rustsecp256k1_v0_10_0_scalar *r, const uint32_t *l) { uint64_t c; uint32_t n0 = l[8], n1 = l[9], n2 = l[10], n3 = l[11], n4 = l[12], n5 = l[13], n6 = l[14], n7 = l[15]; uint32_t m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12; @@ -504,10 +550,10 @@ static void rustsecp256k1_v0_9_2_scalar_reduce_512(rustsecp256k1_v0_9_2_scalar * r->d[7] = c & 0xFFFFFFFFUL; c >>= 32; /* Final reduction of r. */ - rustsecp256k1_v0_9_2_scalar_reduce(r, c + rustsecp256k1_v0_9_2_scalar_check_overflow(r)); + rustsecp256k1_v0_10_0_scalar_reduce(r, c + rustsecp256k1_v0_10_0_scalar_check_overflow(r)); } -static void rustsecp256k1_v0_9_2_scalar_mul_512(uint32_t *l, const rustsecp256k1_v0_9_2_scalar *a, const rustsecp256k1_v0_9_2_scalar *b) { +static void rustsecp256k1_v0_10_0_scalar_mul_512(uint32_t *l, const rustsecp256k1_v0_10_0_scalar *a, const rustsecp256k1_v0_10_0_scalar *b) { /* 96 bit accumulator. */ uint32_t c0 = 0, c1 = 0, c2 = 0; @@ -602,39 +648,19 @@ static void rustsecp256k1_v0_9_2_scalar_mul_512(uint32_t *l, const rustsecp256k1 #undef extract #undef extract_fast -static void rustsecp256k1_v0_9_2_scalar_mul(rustsecp256k1_v0_9_2_scalar *r, const rustsecp256k1_v0_9_2_scalar *a, const rustsecp256k1_v0_9_2_scalar *b) { +static void rustsecp256k1_v0_10_0_scalar_mul(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *a, const rustsecp256k1_v0_10_0_scalar *b) { uint32_t l[16]; - rustsecp256k1_v0_9_2_scalar_verify(a); - rustsecp256k1_v0_9_2_scalar_verify(b); + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); - rustsecp256k1_v0_9_2_scalar_mul_512(l, a, b); - rustsecp256k1_v0_9_2_scalar_reduce_512(r, l); + rustsecp256k1_v0_10_0_scalar_mul_512(l, a, b); + rustsecp256k1_v0_10_0_scalar_reduce_512(r, l); - rustsecp256k1_v0_9_2_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } -static int rustsecp256k1_v0_9_2_scalar_shr_int(rustsecp256k1_v0_9_2_scalar *r, int n) { - int ret; - rustsecp256k1_v0_9_2_scalar_verify(r); - VERIFY_CHECK(n > 0); - VERIFY_CHECK(n < 16); - - ret = r->d[0] & ((1 << n) - 1); - r->d[0] = (r->d[0] >> n) + (r->d[1] << (32 - n)); - r->d[1] = (r->d[1] >> n) + (r->d[2] << (32 - n)); - r->d[2] = (r->d[2] >> n) + (r->d[3] << (32 - n)); - r->d[3] = (r->d[3] >> n) + (r->d[4] << (32 - n)); - r->d[4] = (r->d[4] >> n) + (r->d[5] << (32 - n)); - r->d[5] = (r->d[5] >> n) + (r->d[6] << (32 - n)); - r->d[6] = (r->d[6] >> n) + (r->d[7] << (32 - n)); - r->d[7] = (r->d[7] >> n); - - rustsecp256k1_v0_9_2_scalar_verify(r); - return ret; -} - -static void rustsecp256k1_v0_9_2_scalar_split_128(rustsecp256k1_v0_9_2_scalar *r1, rustsecp256k1_v0_9_2_scalar *r2, const rustsecp256k1_v0_9_2_scalar *k) { - rustsecp256k1_v0_9_2_scalar_verify(k); +static void rustsecp256k1_v0_10_0_scalar_split_128(rustsecp256k1_v0_10_0_scalar *r1, rustsecp256k1_v0_10_0_scalar *r2, const rustsecp256k1_v0_10_0_scalar *k) { + SECP256K1_SCALAR_VERIFY(k); r1->d[0] = k->d[0]; r1->d[1] = k->d[1]; @@ -653,27 +679,27 @@ static void rustsecp256k1_v0_9_2_scalar_split_128(rustsecp256k1_v0_9_2_scalar *r r2->d[6] = 0; r2->d[7] = 0; - rustsecp256k1_v0_9_2_scalar_verify(r1); - rustsecp256k1_v0_9_2_scalar_verify(r2); + SECP256K1_SCALAR_VERIFY(r1); + SECP256K1_SCALAR_VERIFY(r2); } -SECP256K1_INLINE static int rustsecp256k1_v0_9_2_scalar_eq(const rustsecp256k1_v0_9_2_scalar *a, const rustsecp256k1_v0_9_2_scalar *b) { - rustsecp256k1_v0_9_2_scalar_verify(a); - rustsecp256k1_v0_9_2_scalar_verify(b); +SECP256K1_INLINE static int rustsecp256k1_v0_10_0_scalar_eq(const rustsecp256k1_v0_10_0_scalar *a, const rustsecp256k1_v0_10_0_scalar *b) { + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3]) | (a->d[4] ^ b->d[4]) | (a->d[5] ^ b->d[5]) | (a->d[6] ^ b->d[6]) | (a->d[7] ^ b->d[7])) == 0; } -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_scalar_mul_shift_var(rustsecp256k1_v0_9_2_scalar *r, const rustsecp256k1_v0_9_2_scalar *a, const rustsecp256k1_v0_9_2_scalar *b, unsigned int shift) { +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_scalar_mul_shift_var(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *a, const rustsecp256k1_v0_10_0_scalar *b, unsigned int shift) { uint32_t l[16]; unsigned int shiftlimbs; unsigned int shiftlow; unsigned int shifthigh; - rustsecp256k1_v0_9_2_scalar_verify(a); - rustsecp256k1_v0_9_2_scalar_verify(b); + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); VERIFY_CHECK(shift >= 256); - rustsecp256k1_v0_9_2_scalar_mul_512(l, a, b); + rustsecp256k1_v0_10_0_scalar_mul_512(l, a, b); shiftlimbs = shift >> 5; shiftlow = shift & 0x1F; shifthigh = 32 - shiftlow; @@ -685,15 +711,15 @@ SECP256K1_INLINE static void rustsecp256k1_v0_9_2_scalar_mul_shift_var(rustsecp2 r->d[5] = shift < 352 ? (l[5 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[6 + shiftlimbs] << shifthigh) : 0)) : 0; r->d[6] = shift < 320 ? (l[6 + shiftlimbs] >> shiftlow | (shift < 288 && shiftlow ? (l[7 + shiftlimbs] << shifthigh) : 0)) : 0; r->d[7] = shift < 288 ? (l[7 + shiftlimbs] >> shiftlow) : 0; - rustsecp256k1_v0_9_2_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1); + rustsecp256k1_v0_10_0_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1); - rustsecp256k1_v0_9_2_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_scalar_cmov(rustsecp256k1_v0_9_2_scalar *r, const rustsecp256k1_v0_9_2_scalar *a, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_scalar_cmov(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *a, int flag) { uint32_t mask0, mask1; volatile int vflag = flag; - rustsecp256k1_v0_9_2_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); SECP256K1_CHECKMEM_CHECK_VERIFY(r->d, sizeof(r->d)); mask0 = vflag + ~((uint32_t)0); @@ -707,14 +733,14 @@ static SECP256K1_INLINE void rustsecp256k1_v0_9_2_scalar_cmov(rustsecp256k1_v0_9 r->d[6] = (r->d[6] & mask0) | (a->d[6] & mask1); r->d[7] = (r->d[7] & mask0) | (a->d[7] & mask1); - rustsecp256k1_v0_9_2_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } -static void rustsecp256k1_v0_9_2_scalar_from_signed30(rustsecp256k1_v0_9_2_scalar *r, const rustsecp256k1_v0_9_2_modinv32_signed30 *a) { +static void rustsecp256k1_v0_10_0_scalar_from_signed30(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_modinv32_signed30 *a) { const uint32_t a0 = a->v[0], a1 = a->v[1], a2 = a->v[2], a3 = a->v[3], a4 = a->v[4], a5 = a->v[5], a6 = a->v[6], a7 = a->v[7], a8 = a->v[8]; - /* The output from rustsecp256k1_v0_9_2_modinv32{_var} should be normalized to range [0,modulus), and + /* The output from rustsecp256k1_v0_10_0_modinv32{_var} should be normalized to range [0,modulus), and * have limbs in [0,2^30). The modulus is < 2^256, so the top limb must be below 2^(256-30*8). */ VERIFY_CHECK(a0 >> 30 == 0); @@ -736,14 +762,14 @@ static void rustsecp256k1_v0_9_2_scalar_from_signed30(rustsecp256k1_v0_9_2_scala r->d[6] = a6 >> 12 | a7 << 18; r->d[7] = a7 >> 14 | a8 << 16; - rustsecp256k1_v0_9_2_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } -static void rustsecp256k1_v0_9_2_scalar_to_signed30(rustsecp256k1_v0_9_2_modinv32_signed30 *r, const rustsecp256k1_v0_9_2_scalar *a) { +static void rustsecp256k1_v0_10_0_scalar_to_signed30(rustsecp256k1_v0_10_0_modinv32_signed30 *r, const rustsecp256k1_v0_10_0_scalar *a) { const uint32_t M30 = UINT32_MAX >> 2; const uint32_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3], a4 = a->d[4], a5 = a->d[5], a6 = a->d[6], a7 = a->d[7]; - rustsecp256k1_v0_9_2_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); r->v[0] = a0 & M30; r->v[1] = (a0 >> 30 | a1 << 2) & M30; @@ -756,47 +782,43 @@ static void rustsecp256k1_v0_9_2_scalar_to_signed30(rustsecp256k1_v0_9_2_modinv3 r->v[8] = a7 >> 16; } -static const rustsecp256k1_v0_9_2_modinv32_modinfo rustsecp256k1_v0_9_2_const_modinfo_scalar = { +static const rustsecp256k1_v0_10_0_modinv32_modinfo rustsecp256k1_v0_10_0_const_modinfo_scalar = { {{0x10364141L, 0x3F497A33L, 0x348A03BBL, 0x2BB739ABL, -0x146L, 0, 0, 0, 65536}}, 0x2A774EC1L }; -static void rustsecp256k1_v0_9_2_scalar_inverse(rustsecp256k1_v0_9_2_scalar *r, const rustsecp256k1_v0_9_2_scalar *x) { - rustsecp256k1_v0_9_2_modinv32_signed30 s; +static void rustsecp256k1_v0_10_0_scalar_inverse(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *x) { + rustsecp256k1_v0_10_0_modinv32_signed30 s; #ifdef VERIFY - int zero_in = rustsecp256k1_v0_9_2_scalar_is_zero(x); + int zero_in = rustsecp256k1_v0_10_0_scalar_is_zero(x); #endif - rustsecp256k1_v0_9_2_scalar_verify(x); + SECP256K1_SCALAR_VERIFY(x); - rustsecp256k1_v0_9_2_scalar_to_signed30(&s, x); - rustsecp256k1_v0_9_2_modinv32(&s, &rustsecp256k1_v0_9_2_const_modinfo_scalar); - rustsecp256k1_v0_9_2_scalar_from_signed30(r, &s); + rustsecp256k1_v0_10_0_scalar_to_signed30(&s, x); + rustsecp256k1_v0_10_0_modinv32(&s, &rustsecp256k1_v0_10_0_const_modinfo_scalar); + rustsecp256k1_v0_10_0_scalar_from_signed30(r, &s); - rustsecp256k1_v0_9_2_scalar_verify(r); -#ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_9_2_scalar_is_zero(r) == zero_in); -#endif + SECP256K1_SCALAR_VERIFY(r); + VERIFY_CHECK(rustsecp256k1_v0_10_0_scalar_is_zero(r) == zero_in); } -static void rustsecp256k1_v0_9_2_scalar_inverse_var(rustsecp256k1_v0_9_2_scalar *r, const rustsecp256k1_v0_9_2_scalar *x) { - rustsecp256k1_v0_9_2_modinv32_signed30 s; +static void rustsecp256k1_v0_10_0_scalar_inverse_var(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *x) { + rustsecp256k1_v0_10_0_modinv32_signed30 s; #ifdef VERIFY - int zero_in = rustsecp256k1_v0_9_2_scalar_is_zero(x); + int zero_in = rustsecp256k1_v0_10_0_scalar_is_zero(x); #endif - rustsecp256k1_v0_9_2_scalar_verify(x); + SECP256K1_SCALAR_VERIFY(x); - rustsecp256k1_v0_9_2_scalar_to_signed30(&s, x); - rustsecp256k1_v0_9_2_modinv32_var(&s, &rustsecp256k1_v0_9_2_const_modinfo_scalar); - rustsecp256k1_v0_9_2_scalar_from_signed30(r, &s); + rustsecp256k1_v0_10_0_scalar_to_signed30(&s, x); + rustsecp256k1_v0_10_0_modinv32_var(&s, &rustsecp256k1_v0_10_0_const_modinfo_scalar); + rustsecp256k1_v0_10_0_scalar_from_signed30(r, &s); - rustsecp256k1_v0_9_2_scalar_verify(r); -#ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_9_2_scalar_is_zero(r) == zero_in); -#endif + SECP256K1_SCALAR_VERIFY(r); + VERIFY_CHECK(rustsecp256k1_v0_10_0_scalar_is_zero(r) == zero_in); } -SECP256K1_INLINE static int rustsecp256k1_v0_9_2_scalar_is_even(const rustsecp256k1_v0_9_2_scalar *a) { - rustsecp256k1_v0_9_2_scalar_verify(a); +SECP256K1_INLINE static int rustsecp256k1_v0_10_0_scalar_is_even(const rustsecp256k1_v0_10_0_scalar *a) { + SECP256K1_SCALAR_VERIFY(a); return !(a->d[0] & 1); } diff --git a/secp256k1-sys/depend/secp256k1/src/scalar_impl.h b/secp256k1-sys/depend/secp256k1/src/scalar_impl.h index 4f786157d..96a2f29ea 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar_impl.h @@ -24,21 +24,19 @@ #error "Please select wide multiplication implementation" #endif -static const rustsecp256k1_v0_9_2_scalar rustsecp256k1_v0_9_2_scalar_one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); -static const rustsecp256k1_v0_9_2_scalar rustsecp256k1_v0_9_2_scalar_zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); +static const rustsecp256k1_v0_10_0_scalar rustsecp256k1_v0_10_0_scalar_one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); +static const rustsecp256k1_v0_10_0_scalar rustsecp256k1_v0_10_0_scalar_zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); -static int rustsecp256k1_v0_9_2_scalar_set_b32_seckey(rustsecp256k1_v0_9_2_scalar *r, const unsigned char *bin) { +static int rustsecp256k1_v0_10_0_scalar_set_b32_seckey(rustsecp256k1_v0_10_0_scalar *r, const unsigned char *bin) { int overflow; - rustsecp256k1_v0_9_2_scalar_set_b32(r, bin, &overflow); + rustsecp256k1_v0_10_0_scalar_set_b32(r, bin, &overflow); - rustsecp256k1_v0_9_2_scalar_verify(r); - return (!overflow) & (!rustsecp256k1_v0_9_2_scalar_is_zero(r)); + SECP256K1_SCALAR_VERIFY(r); + return (!overflow) & (!rustsecp256k1_v0_10_0_scalar_is_zero(r)); } -static void rustsecp256k1_v0_9_2_scalar_verify(const rustsecp256k1_v0_9_2_scalar *r) { -#ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_9_2_scalar_check_overflow(r) == 0); -#endif +static void rustsecp256k1_v0_10_0_scalar_verify(const rustsecp256k1_v0_10_0_scalar *r) { + VERIFY_CHECK(rustsecp256k1_v0_10_0_scalar_check_overflow(r) == 0); (void)r; } @@ -62,8 +60,8 @@ static void rustsecp256k1_v0_9_2_scalar_verify(const rustsecp256k1_v0_9_2_scalar * nontrivial to get full test coverage for the exhaustive tests. We therefore * (arbitrarily) set r2 = k + 5 (mod n) and r1 = k - r2 * lambda (mod n). */ -static void rustsecp256k1_v0_9_2_scalar_split_lambda(rustsecp256k1_v0_9_2_scalar * SECP256K1_RESTRICT r1, rustsecp256k1_v0_9_2_scalar * SECP256K1_RESTRICT r2, const rustsecp256k1_v0_9_2_scalar * SECP256K1_RESTRICT k) { - rustsecp256k1_v0_9_2_scalar_verify(k); +static void rustsecp256k1_v0_10_0_scalar_split_lambda(rustsecp256k1_v0_10_0_scalar * SECP256K1_RESTRICT r1, rustsecp256k1_v0_10_0_scalar * SECP256K1_RESTRICT r2, const rustsecp256k1_v0_10_0_scalar * SECP256K1_RESTRICT k) { + SECP256K1_SCALAR_VERIFY(k); VERIFY_CHECK(r1 != k); VERIFY_CHECK(r2 != k); VERIFY_CHECK(r1 != r2); @@ -71,20 +69,20 @@ static void rustsecp256k1_v0_9_2_scalar_split_lambda(rustsecp256k1_v0_9_2_scalar *r2 = (*k + 5) % EXHAUSTIVE_TEST_ORDER; *r1 = (*k + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER; - rustsecp256k1_v0_9_2_scalar_verify(r1); - rustsecp256k1_v0_9_2_scalar_verify(r2); + SECP256K1_SCALAR_VERIFY(r1); + SECP256K1_SCALAR_VERIFY(r2); } #else /** * The Secp256k1 curve has an endomorphism, where lambda * (x, y) = (beta * x, y), where * lambda is: */ -static const rustsecp256k1_v0_9_2_scalar rustsecp256k1_v0_9_2_const_lambda = SECP256K1_SCALAR_CONST( +static const rustsecp256k1_v0_10_0_scalar rustsecp256k1_v0_10_0_const_lambda = SECP256K1_SCALAR_CONST( 0x5363AD4CUL, 0xC05C30E0UL, 0xA5261C02UL, 0x8812645AUL, 0x122E22EAUL, 0x20816678UL, 0xDF02967CUL, 0x1B23BD72UL ); #ifdef VERIFY -static void rustsecp256k1_v0_9_2_scalar_split_lambda_verify(const rustsecp256k1_v0_9_2_scalar *r1, const rustsecp256k1_v0_9_2_scalar *r2, const rustsecp256k1_v0_9_2_scalar *k); +static void rustsecp256k1_v0_10_0_scalar_split_lambda_verify(const rustsecp256k1_v0_10_0_scalar *r1, const rustsecp256k1_v0_10_0_scalar *r2, const rustsecp256k1_v0_10_0_scalar *k); #endif /* @@ -137,49 +135,49 @@ static void rustsecp256k1_v0_9_2_scalar_split_lambda_verify(const rustsecp256k1_ * * See proof below. */ -static void rustsecp256k1_v0_9_2_scalar_split_lambda(rustsecp256k1_v0_9_2_scalar * SECP256K1_RESTRICT r1, rustsecp256k1_v0_9_2_scalar * SECP256K1_RESTRICT r2, const rustsecp256k1_v0_9_2_scalar * SECP256K1_RESTRICT k) { - rustsecp256k1_v0_9_2_scalar c1, c2; - static const rustsecp256k1_v0_9_2_scalar minus_b1 = SECP256K1_SCALAR_CONST( +static void rustsecp256k1_v0_10_0_scalar_split_lambda(rustsecp256k1_v0_10_0_scalar * SECP256K1_RESTRICT r1, rustsecp256k1_v0_10_0_scalar * SECP256K1_RESTRICT r2, const rustsecp256k1_v0_10_0_scalar * SECP256K1_RESTRICT k) { + rustsecp256k1_v0_10_0_scalar c1, c2; + static const rustsecp256k1_v0_10_0_scalar minus_b1 = SECP256K1_SCALAR_CONST( 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00000000UL, 0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C3UL ); - static const rustsecp256k1_v0_9_2_scalar minus_b2 = SECP256K1_SCALAR_CONST( + static const rustsecp256k1_v0_10_0_scalar minus_b2 = SECP256K1_SCALAR_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL, 0x8A280AC5UL, 0x0774346DUL, 0xD765CDA8UL, 0x3DB1562CUL ); - static const rustsecp256k1_v0_9_2_scalar g1 = SECP256K1_SCALAR_CONST( + static const rustsecp256k1_v0_10_0_scalar g1 = SECP256K1_SCALAR_CONST( 0x3086D221UL, 0xA7D46BCDUL, 0xE86C90E4UL, 0x9284EB15UL, 0x3DAA8A14UL, 0x71E8CA7FUL, 0xE893209AUL, 0x45DBB031UL ); - static const rustsecp256k1_v0_9_2_scalar g2 = SECP256K1_SCALAR_CONST( + static const rustsecp256k1_v0_10_0_scalar g2 = SECP256K1_SCALAR_CONST( 0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C4UL, 0x221208ACUL, 0x9DF506C6UL, 0x1571B4AEUL, 0x8AC47F71UL ); - rustsecp256k1_v0_9_2_scalar_verify(k); + SECP256K1_SCALAR_VERIFY(k); VERIFY_CHECK(r1 != k); VERIFY_CHECK(r2 != k); VERIFY_CHECK(r1 != r2); /* these _var calls are constant time since the shift amount is constant */ - rustsecp256k1_v0_9_2_scalar_mul_shift_var(&c1, k, &g1, 384); - rustsecp256k1_v0_9_2_scalar_mul_shift_var(&c2, k, &g2, 384); - rustsecp256k1_v0_9_2_scalar_mul(&c1, &c1, &minus_b1); - rustsecp256k1_v0_9_2_scalar_mul(&c2, &c2, &minus_b2); - rustsecp256k1_v0_9_2_scalar_add(r2, &c1, &c2); - rustsecp256k1_v0_9_2_scalar_mul(r1, r2, &rustsecp256k1_v0_9_2_const_lambda); - rustsecp256k1_v0_9_2_scalar_negate(r1, r1); - rustsecp256k1_v0_9_2_scalar_add(r1, r1, k); + rustsecp256k1_v0_10_0_scalar_mul_shift_var(&c1, k, &g1, 384); + rustsecp256k1_v0_10_0_scalar_mul_shift_var(&c2, k, &g2, 384); + rustsecp256k1_v0_10_0_scalar_mul(&c1, &c1, &minus_b1); + rustsecp256k1_v0_10_0_scalar_mul(&c2, &c2, &minus_b2); + rustsecp256k1_v0_10_0_scalar_add(r2, &c1, &c2); + rustsecp256k1_v0_10_0_scalar_mul(r1, r2, &rustsecp256k1_v0_10_0_const_lambda); + rustsecp256k1_v0_10_0_scalar_negate(r1, r1); + rustsecp256k1_v0_10_0_scalar_add(r1, r1, k); - rustsecp256k1_v0_9_2_scalar_verify(r1); - rustsecp256k1_v0_9_2_scalar_verify(r2); + SECP256K1_SCALAR_VERIFY(r1); + SECP256K1_SCALAR_VERIFY(r2); #ifdef VERIFY - rustsecp256k1_v0_9_2_scalar_split_lambda_verify(r1, r2, k); + rustsecp256k1_v0_10_0_scalar_split_lambda_verify(r1, r2, k); #endif } #ifdef VERIFY /* - * Proof for rustsecp256k1_v0_9_2_scalar_split_lambda's bounds. + * Proof for rustsecp256k1_v0_10_0_scalar_split_lambda's bounds. * * Let * - epsilon1 = 2^256 * |g1/2^384 - b2/d| @@ -282,8 +280,8 @@ static void rustsecp256k1_v0_9_2_scalar_split_lambda(rustsecp256k1_v0_9_2_scalar * * Q.E.D. */ -static void rustsecp256k1_v0_9_2_scalar_split_lambda_verify(const rustsecp256k1_v0_9_2_scalar *r1, const rustsecp256k1_v0_9_2_scalar *r2, const rustsecp256k1_v0_9_2_scalar *k) { - rustsecp256k1_v0_9_2_scalar s; +static void rustsecp256k1_v0_10_0_scalar_split_lambda_verify(const rustsecp256k1_v0_10_0_scalar *r1, const rustsecp256k1_v0_10_0_scalar *r2, const rustsecp256k1_v0_10_0_scalar *k) { + rustsecp256k1_v0_10_0_scalar s; unsigned char buf1[32]; unsigned char buf2[32]; @@ -299,19 +297,19 @@ static void rustsecp256k1_v0_9_2_scalar_split_lambda_verify(const rustsecp256k1_ 0x8a, 0x65, 0x28, 0x7b, 0xd4, 0x71, 0x79, 0xfb, 0x2b, 0xe0, 0x88, 0x46, 0xce, 0xa2, 0x67, 0xed }; - rustsecp256k1_v0_9_2_scalar_mul(&s, &rustsecp256k1_v0_9_2_const_lambda, r2); - rustsecp256k1_v0_9_2_scalar_add(&s, &s, r1); - VERIFY_CHECK(rustsecp256k1_v0_9_2_scalar_eq(&s, k)); + rustsecp256k1_v0_10_0_scalar_mul(&s, &rustsecp256k1_v0_10_0_const_lambda, r2); + rustsecp256k1_v0_10_0_scalar_add(&s, &s, r1); + VERIFY_CHECK(rustsecp256k1_v0_10_0_scalar_eq(&s, k)); - rustsecp256k1_v0_9_2_scalar_negate(&s, r1); - rustsecp256k1_v0_9_2_scalar_get_b32(buf1, r1); - rustsecp256k1_v0_9_2_scalar_get_b32(buf2, &s); - VERIFY_CHECK(rustsecp256k1_v0_9_2_memcmp_var(buf1, k1_bound, 32) < 0 || rustsecp256k1_v0_9_2_memcmp_var(buf2, k1_bound, 32) < 0); + rustsecp256k1_v0_10_0_scalar_negate(&s, r1); + rustsecp256k1_v0_10_0_scalar_get_b32(buf1, r1); + rustsecp256k1_v0_10_0_scalar_get_b32(buf2, &s); + VERIFY_CHECK(rustsecp256k1_v0_10_0_memcmp_var(buf1, k1_bound, 32) < 0 || rustsecp256k1_v0_10_0_memcmp_var(buf2, k1_bound, 32) < 0); - rustsecp256k1_v0_9_2_scalar_negate(&s, r2); - rustsecp256k1_v0_9_2_scalar_get_b32(buf1, r2); - rustsecp256k1_v0_9_2_scalar_get_b32(buf2, &s); - VERIFY_CHECK(rustsecp256k1_v0_9_2_memcmp_var(buf1, k2_bound, 32) < 0 || rustsecp256k1_v0_9_2_memcmp_var(buf2, k2_bound, 32) < 0); + rustsecp256k1_v0_10_0_scalar_negate(&s, r2); + rustsecp256k1_v0_10_0_scalar_get_b32(buf1, r2); + rustsecp256k1_v0_10_0_scalar_get_b32(buf2, &s); + VERIFY_CHECK(rustsecp256k1_v0_10_0_memcmp_var(buf1, k2_bound, 32) < 0 || rustsecp256k1_v0_10_0_memcmp_var(buf2, k2_bound, 32) < 0); } #endif /* VERIFY */ #endif /* !defined(EXHAUSTIVE_TEST_ORDER) */ diff --git a/secp256k1-sys/depend/secp256k1/src/scalar_low.h b/secp256k1-sys/depend/secp256k1/src/scalar_low.h index bc7eb309d..b5ad2a95b 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar_low.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar_low.h @@ -1,5 +1,5 @@ /*********************************************************************** - * Copyright (c) 2015 Andrew Poelstra * + * Copyright (c) 2015, 2022 Andrew Poelstra, Pieter Wuille * * Distributed under the MIT software license, see the accompanying * * file COPYING or https://www.opensource.org/licenses/mit-license.php.* ***********************************************************************/ @@ -10,8 +10,15 @@ #include /** A scalar modulo the group order of the secp256k1 curve. */ -typedef uint32_t rustsecp256k1_v0_9_2_scalar; +typedef uint32_t rustsecp256k1_v0_10_0_scalar; -#define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0) (d0) +/* A compile-time constant equal to 2^32 (modulo order). */ +#define SCALAR_2P32 ((0xffffffffUL % EXHAUSTIVE_TEST_ORDER) + 1U) + +/* Compute a*2^32 + b (modulo order). */ +#define SCALAR_HORNER(a, b) (((uint64_t)(a) * SCALAR_2P32 + (b)) % EXHAUSTIVE_TEST_ORDER) + +/* Evaluates to the provided 256-bit constant reduced modulo order. */ +#define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0) SCALAR_HORNER(SCALAR_HORNER(SCALAR_HORNER(SCALAR_HORNER(SCALAR_HORNER(SCALAR_HORNER(SCALAR_HORNER((d7), (d6)), (d5)), (d4)), (d3)), (d2)), (d1)), (d0)) #endif /* SECP256K1_SCALAR_REPR_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/scalar_low_impl.h b/secp256k1-sys/depend/secp256k1/src/scalar_low_impl.h index c35b621e0..377ae6ddb 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar_low_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar_low_impl.h @@ -13,22 +13,22 @@ #include -SECP256K1_INLINE static int rustsecp256k1_v0_9_2_scalar_is_even(const rustsecp256k1_v0_9_2_scalar *a) { - rustsecp256k1_v0_9_2_scalar_verify(a); +SECP256K1_INLINE static int rustsecp256k1_v0_10_0_scalar_is_even(const rustsecp256k1_v0_10_0_scalar *a) { + SECP256K1_SCALAR_VERIFY(a); return !(*a & 1); } -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_scalar_clear(rustsecp256k1_v0_9_2_scalar *r) { *r = 0; } +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_scalar_clear(rustsecp256k1_v0_10_0_scalar *r) { *r = 0; } -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_scalar_set_int(rustsecp256k1_v0_9_2_scalar *r, unsigned int v) { +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_scalar_set_int(rustsecp256k1_v0_10_0_scalar *r, unsigned int v) { *r = v % EXHAUSTIVE_TEST_ORDER; - rustsecp256k1_v0_9_2_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } -SECP256K1_INLINE static unsigned int rustsecp256k1_v0_9_2_scalar_get_bits(const rustsecp256k1_v0_9_2_scalar *a, unsigned int offset, unsigned int count) { - rustsecp256k1_v0_9_2_scalar_verify(a); +SECP256K1_INLINE static unsigned int rustsecp256k1_v0_10_0_scalar_get_bits(const rustsecp256k1_v0_10_0_scalar *a, unsigned int offset, unsigned int count) { + SECP256K1_SCALAR_VERIFY(a); if (offset < 32) return ((*a >> offset) & ((((uint32_t)1) << count) - 1)); @@ -36,39 +36,37 @@ SECP256K1_INLINE static unsigned int rustsecp256k1_v0_9_2_scalar_get_bits(const return 0; } -SECP256K1_INLINE static unsigned int rustsecp256k1_v0_9_2_scalar_get_bits_var(const rustsecp256k1_v0_9_2_scalar *a, unsigned int offset, unsigned int count) { - rustsecp256k1_v0_9_2_scalar_verify(a); +SECP256K1_INLINE static unsigned int rustsecp256k1_v0_10_0_scalar_get_bits_var(const rustsecp256k1_v0_10_0_scalar *a, unsigned int offset, unsigned int count) { + SECP256K1_SCALAR_VERIFY(a); - return rustsecp256k1_v0_9_2_scalar_get_bits(a, offset, count); + return rustsecp256k1_v0_10_0_scalar_get_bits(a, offset, count); } -SECP256K1_INLINE static int rustsecp256k1_v0_9_2_scalar_check_overflow(const rustsecp256k1_v0_9_2_scalar *a) { return *a >= EXHAUSTIVE_TEST_ORDER; } +SECP256K1_INLINE static int rustsecp256k1_v0_10_0_scalar_check_overflow(const rustsecp256k1_v0_10_0_scalar *a) { return *a >= EXHAUSTIVE_TEST_ORDER; } -static int rustsecp256k1_v0_9_2_scalar_add(rustsecp256k1_v0_9_2_scalar *r, const rustsecp256k1_v0_9_2_scalar *a, const rustsecp256k1_v0_9_2_scalar *b) { - rustsecp256k1_v0_9_2_scalar_verify(a); - rustsecp256k1_v0_9_2_scalar_verify(b); +static int rustsecp256k1_v0_10_0_scalar_add(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *a, const rustsecp256k1_v0_10_0_scalar *b) { + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); *r = (*a + *b) % EXHAUSTIVE_TEST_ORDER; - rustsecp256k1_v0_9_2_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); return *r < *b; } -static void rustsecp256k1_v0_9_2_scalar_cadd_bit(rustsecp256k1_v0_9_2_scalar *r, unsigned int bit, int flag) { - rustsecp256k1_v0_9_2_scalar_verify(r); +static void rustsecp256k1_v0_10_0_scalar_cadd_bit(rustsecp256k1_v0_10_0_scalar *r, unsigned int bit, int flag) { + SECP256K1_SCALAR_VERIFY(r); if (flag && bit < 32) *r += ((uint32_t)1 << bit); - rustsecp256k1_v0_9_2_scalar_verify(r); -#ifdef VERIFY + SECP256K1_SCALAR_VERIFY(r); VERIFY_CHECK(bit < 32); /* Verify that adding (1 << bit) will not overflow any in-range scalar *r by overflowing the underlying uint32_t. */ VERIFY_CHECK(((uint32_t)1 << bit) - 1 <= UINT32_MAX - EXHAUSTIVE_TEST_ORDER); -#endif } -static void rustsecp256k1_v0_9_2_scalar_set_b32(rustsecp256k1_v0_9_2_scalar *r, const unsigned char *b32, int *overflow) { +static void rustsecp256k1_v0_10_0_scalar_set_b32(rustsecp256k1_v0_10_0_scalar *r, const unsigned char *b32, int *overflow) { int i; int over = 0; *r = 0; @@ -81,24 +79,24 @@ static void rustsecp256k1_v0_9_2_scalar_set_b32(rustsecp256k1_v0_9_2_scalar *r, } if (overflow) *overflow = over; - rustsecp256k1_v0_9_2_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } -static void rustsecp256k1_v0_9_2_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_9_2_scalar* a) { - rustsecp256k1_v0_9_2_scalar_verify(a); +static void rustsecp256k1_v0_10_0_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_10_0_scalar* a) { + SECP256K1_SCALAR_VERIFY(a); memset(bin, 0, 32); bin[28] = *a >> 24; bin[29] = *a >> 16; bin[30] = *a >> 8; bin[31] = *a; } -SECP256K1_INLINE static int rustsecp256k1_v0_9_2_scalar_is_zero(const rustsecp256k1_v0_9_2_scalar *a) { - rustsecp256k1_v0_9_2_scalar_verify(a); +SECP256K1_INLINE static int rustsecp256k1_v0_10_0_scalar_is_zero(const rustsecp256k1_v0_10_0_scalar *a) { + SECP256K1_SCALAR_VERIFY(a); return *a == 0; } -static void rustsecp256k1_v0_9_2_scalar_negate(rustsecp256k1_v0_9_2_scalar *r, const rustsecp256k1_v0_9_2_scalar *a) { - rustsecp256k1_v0_9_2_scalar_verify(a); +static void rustsecp256k1_v0_10_0_scalar_negate(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *a) { + SECP256K1_SCALAR_VERIFY(a); if (*a == 0) { *r = 0; @@ -106,103 +104,98 @@ static void rustsecp256k1_v0_9_2_scalar_negate(rustsecp256k1_v0_9_2_scalar *r, c *r = EXHAUSTIVE_TEST_ORDER - *a; } - rustsecp256k1_v0_9_2_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } -SECP256K1_INLINE static int rustsecp256k1_v0_9_2_scalar_is_one(const rustsecp256k1_v0_9_2_scalar *a) { - rustsecp256k1_v0_9_2_scalar_verify(a); +SECP256K1_INLINE static int rustsecp256k1_v0_10_0_scalar_is_one(const rustsecp256k1_v0_10_0_scalar *a) { + SECP256K1_SCALAR_VERIFY(a); return *a == 1; } -static int rustsecp256k1_v0_9_2_scalar_is_high(const rustsecp256k1_v0_9_2_scalar *a) { - rustsecp256k1_v0_9_2_scalar_verify(a); +static int rustsecp256k1_v0_10_0_scalar_is_high(const rustsecp256k1_v0_10_0_scalar *a) { + SECP256K1_SCALAR_VERIFY(a); return *a > EXHAUSTIVE_TEST_ORDER / 2; } -static int rustsecp256k1_v0_9_2_scalar_cond_negate(rustsecp256k1_v0_9_2_scalar *r, int flag) { - rustsecp256k1_v0_9_2_scalar_verify(r); +static int rustsecp256k1_v0_10_0_scalar_cond_negate(rustsecp256k1_v0_10_0_scalar *r, int flag) { + SECP256K1_SCALAR_VERIFY(r); - if (flag) rustsecp256k1_v0_9_2_scalar_negate(r, r); + if (flag) rustsecp256k1_v0_10_0_scalar_negate(r, r); - rustsecp256k1_v0_9_2_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); return flag ? -1 : 1; } -static void rustsecp256k1_v0_9_2_scalar_mul(rustsecp256k1_v0_9_2_scalar *r, const rustsecp256k1_v0_9_2_scalar *a, const rustsecp256k1_v0_9_2_scalar *b) { - rustsecp256k1_v0_9_2_scalar_verify(a); - rustsecp256k1_v0_9_2_scalar_verify(b); +static void rustsecp256k1_v0_10_0_scalar_mul(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *a, const rustsecp256k1_v0_10_0_scalar *b) { + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); *r = (*a * *b) % EXHAUSTIVE_TEST_ORDER; - rustsecp256k1_v0_9_2_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } -static int rustsecp256k1_v0_9_2_scalar_shr_int(rustsecp256k1_v0_9_2_scalar *r, int n) { - int ret; - rustsecp256k1_v0_9_2_scalar_verify(r); - VERIFY_CHECK(n > 0); - VERIFY_CHECK(n < 16); - - ret = *r & ((1 << n) - 1); - *r >>= n; - - rustsecp256k1_v0_9_2_scalar_verify(r); - return ret; -} - -static void rustsecp256k1_v0_9_2_scalar_split_128(rustsecp256k1_v0_9_2_scalar *r1, rustsecp256k1_v0_9_2_scalar *r2, const rustsecp256k1_v0_9_2_scalar *a) { - rustsecp256k1_v0_9_2_scalar_verify(a); +static void rustsecp256k1_v0_10_0_scalar_split_128(rustsecp256k1_v0_10_0_scalar *r1, rustsecp256k1_v0_10_0_scalar *r2, const rustsecp256k1_v0_10_0_scalar *a) { + SECP256K1_SCALAR_VERIFY(a); *r1 = *a; *r2 = 0; - rustsecp256k1_v0_9_2_scalar_verify(r1); - rustsecp256k1_v0_9_2_scalar_verify(r2); + SECP256K1_SCALAR_VERIFY(r1); + SECP256K1_SCALAR_VERIFY(r2); } -SECP256K1_INLINE static int rustsecp256k1_v0_9_2_scalar_eq(const rustsecp256k1_v0_9_2_scalar *a, const rustsecp256k1_v0_9_2_scalar *b) { - rustsecp256k1_v0_9_2_scalar_verify(a); - rustsecp256k1_v0_9_2_scalar_verify(b); +SECP256K1_INLINE static int rustsecp256k1_v0_10_0_scalar_eq(const rustsecp256k1_v0_10_0_scalar *a, const rustsecp256k1_v0_10_0_scalar *b) { + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); return *a == *b; } -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_scalar_cmov(rustsecp256k1_v0_9_2_scalar *r, const rustsecp256k1_v0_9_2_scalar *a, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_scalar_cmov(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *a, int flag) { uint32_t mask0, mask1; volatile int vflag = flag; - rustsecp256k1_v0_9_2_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); SECP256K1_CHECKMEM_CHECK_VERIFY(r, sizeof(*r)); mask0 = vflag + ~((uint32_t)0); mask1 = ~mask0; *r = (*r & mask0) | (*a & mask1); - rustsecp256k1_v0_9_2_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } -static void rustsecp256k1_v0_9_2_scalar_inverse(rustsecp256k1_v0_9_2_scalar *r, const rustsecp256k1_v0_9_2_scalar *x) { +static void rustsecp256k1_v0_10_0_scalar_inverse(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *x) { int i; *r = 0; - rustsecp256k1_v0_9_2_scalar_verify(x); + SECP256K1_SCALAR_VERIFY(x); for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1) *r = i; - rustsecp256k1_v0_9_2_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); /* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus * have a composite group order; fix it in exhaustive_tests.c). */ VERIFY_CHECK(*r != 0); } -static void rustsecp256k1_v0_9_2_scalar_inverse_var(rustsecp256k1_v0_9_2_scalar *r, const rustsecp256k1_v0_9_2_scalar *x) { - rustsecp256k1_v0_9_2_scalar_verify(x); +static void rustsecp256k1_v0_10_0_scalar_inverse_var(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *x) { + SECP256K1_SCALAR_VERIFY(x); + + rustsecp256k1_v0_10_0_scalar_inverse(r, x); + + SECP256K1_SCALAR_VERIFY(r); +} + +static void rustsecp256k1_v0_10_0_scalar_half(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_scalar *a) { + SECP256K1_SCALAR_VERIFY(a); - rustsecp256k1_v0_9_2_scalar_inverse(r, x); + *r = (*a + ((-(uint32_t)(*a & 1)) & EXHAUSTIVE_TEST_ORDER)) >> 1; - rustsecp256k1_v0_9_2_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } #endif /* SECP256K1_SCALAR_REPR_IMPL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/scratch.h b/secp256k1-sys/depend/secp256k1/src/scratch.h index 66cee4ecf..7dc178804 100644 --- a/secp256k1-sys/depend/secp256k1/src/scratch.h +++ b/secp256k1-sys/depend/secp256k1/src/scratch.h @@ -9,7 +9,7 @@ /* The typedef is used internally; the struct name is used in the public API * (where it is exposed as a different typedef) */ -typedef struct rustsecp256k1_v0_9_2_scratch_space_struct { +typedef struct rustsecp256k1_v0_10_0_scratch_space_struct { /** guard against interpreting this object as other types */ unsigned char magic[8]; /** actual allocated data */ @@ -19,24 +19,24 @@ typedef struct rustsecp256k1_v0_9_2_scratch_space_struct { size_t alloc_size; /** maximum size available to allocate */ size_t max_size; -} rustsecp256k1_v0_9_2_scratch; +} rustsecp256k1_v0_10_0_scratch; -static rustsecp256k1_v0_9_2_scratch* rustsecp256k1_v0_9_2_scratch_create(const rustsecp256k1_v0_9_2_callback* error_callback, size_t max_size); +static rustsecp256k1_v0_10_0_scratch* rustsecp256k1_v0_10_0_scratch_create(const rustsecp256k1_v0_10_0_callback* error_callback, size_t max_size); -static void rustsecp256k1_v0_9_2_scratch_destroy(const rustsecp256k1_v0_9_2_callback* error_callback, rustsecp256k1_v0_9_2_scratch* scratch); +static void rustsecp256k1_v0_10_0_scratch_destroy(const rustsecp256k1_v0_10_0_callback* error_callback, rustsecp256k1_v0_10_0_scratch* scratch); /** Returns an opaque object used to "checkpoint" a scratch space. Used - * with `rustsecp256k1_v0_9_2_scratch_apply_checkpoint` to undo allocations. */ -static size_t rustsecp256k1_v0_9_2_scratch_checkpoint(const rustsecp256k1_v0_9_2_callback* error_callback, const rustsecp256k1_v0_9_2_scratch* scratch); + * with `rustsecp256k1_v0_10_0_scratch_apply_checkpoint` to undo allocations. */ +static size_t rustsecp256k1_v0_10_0_scratch_checkpoint(const rustsecp256k1_v0_10_0_callback* error_callback, const rustsecp256k1_v0_10_0_scratch* scratch); -/** Applies a check point received from `rustsecp256k1_v0_9_2_scratch_checkpoint`, +/** Applies a check point received from `rustsecp256k1_v0_10_0_scratch_checkpoint`, * undoing all allocations since that point. */ -static void rustsecp256k1_v0_9_2_scratch_apply_checkpoint(const rustsecp256k1_v0_9_2_callback* error_callback, rustsecp256k1_v0_9_2_scratch* scratch, size_t checkpoint); +static void rustsecp256k1_v0_10_0_scratch_apply_checkpoint(const rustsecp256k1_v0_10_0_callback* error_callback, rustsecp256k1_v0_10_0_scratch* scratch, size_t checkpoint); /** Returns the maximum allocation the scratch space will allow */ -static size_t rustsecp256k1_v0_9_2_scratch_max_allocation(const rustsecp256k1_v0_9_2_callback* error_callback, const rustsecp256k1_v0_9_2_scratch* scratch, size_t n_objects); +static size_t rustsecp256k1_v0_10_0_scratch_max_allocation(const rustsecp256k1_v0_10_0_callback* error_callback, const rustsecp256k1_v0_10_0_scratch* scratch, size_t n_objects); /** Returns a pointer into the most recently allocated frame, or NULL if there is insufficient available space */ -static void *rustsecp256k1_v0_9_2_scratch_alloc(const rustsecp256k1_v0_9_2_callback* error_callback, rustsecp256k1_v0_9_2_scratch* scratch, size_t n); +static void *rustsecp256k1_v0_10_0_scratch_alloc(const rustsecp256k1_v0_10_0_callback* error_callback, rustsecp256k1_v0_10_0_scratch* scratch, size_t n); #endif diff --git a/secp256k1-sys/depend/secp256k1/src/scratch_impl.h b/secp256k1-sys/depend/secp256k1/src/scratch_impl.h index 2686dac57..cfcb464a3 100644 --- a/secp256k1-sys/depend/secp256k1/src/scratch_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/scratch_impl.h @@ -10,29 +10,29 @@ #include "util.h" #include "scratch.h" -static size_t rustsecp256k1_v0_9_2_scratch_checkpoint(const rustsecp256k1_v0_9_2_callback* error_callback, const rustsecp256k1_v0_9_2_scratch* scratch) { - if (rustsecp256k1_v0_9_2_memcmp_var(scratch->magic, "scratch", 8) != 0) { - rustsecp256k1_v0_9_2_callback_call(error_callback, "invalid scratch space"); +static size_t rustsecp256k1_v0_10_0_scratch_checkpoint(const rustsecp256k1_v0_10_0_callback* error_callback, const rustsecp256k1_v0_10_0_scratch* scratch) { + if (rustsecp256k1_v0_10_0_memcmp_var(scratch->magic, "scratch", 8) != 0) { + rustsecp256k1_v0_10_0_callback_call(error_callback, "invalid scratch space"); return 0; } return scratch->alloc_size; } -static void rustsecp256k1_v0_9_2_scratch_apply_checkpoint(const rustsecp256k1_v0_9_2_callback* error_callback, rustsecp256k1_v0_9_2_scratch* scratch, size_t checkpoint) { - if (rustsecp256k1_v0_9_2_memcmp_var(scratch->magic, "scratch", 8) != 0) { - rustsecp256k1_v0_9_2_callback_call(error_callback, "invalid scratch space"); +static void rustsecp256k1_v0_10_0_scratch_apply_checkpoint(const rustsecp256k1_v0_10_0_callback* error_callback, rustsecp256k1_v0_10_0_scratch* scratch, size_t checkpoint) { + if (rustsecp256k1_v0_10_0_memcmp_var(scratch->magic, "scratch", 8) != 0) { + rustsecp256k1_v0_10_0_callback_call(error_callback, "invalid scratch space"); return; } if (checkpoint > scratch->alloc_size) { - rustsecp256k1_v0_9_2_callback_call(error_callback, "invalid checkpoint"); + rustsecp256k1_v0_10_0_callback_call(error_callback, "invalid checkpoint"); return; } scratch->alloc_size = checkpoint; } -static size_t rustsecp256k1_v0_9_2_scratch_max_allocation(const rustsecp256k1_v0_9_2_callback* error_callback, const rustsecp256k1_v0_9_2_scratch* scratch, size_t objects) { - if (rustsecp256k1_v0_9_2_memcmp_var(scratch->magic, "scratch", 8) != 0) { - rustsecp256k1_v0_9_2_callback_call(error_callback, "invalid scratch space"); +static size_t rustsecp256k1_v0_10_0_scratch_max_allocation(const rustsecp256k1_v0_10_0_callback* error_callback, const rustsecp256k1_v0_10_0_scratch* scratch, size_t objects) { + if (rustsecp256k1_v0_10_0_memcmp_var(scratch->magic, "scratch", 8) != 0) { + rustsecp256k1_v0_10_0_callback_call(error_callback, "invalid scratch space"); return 0; } /* Ensure that multiplication will not wrap around */ @@ -45,7 +45,7 @@ static size_t rustsecp256k1_v0_9_2_scratch_max_allocation(const rustsecp256k1_v0 return scratch->max_size - scratch->alloc_size - objects * (ALIGNMENT - 1); } -static void *rustsecp256k1_v0_9_2_scratch_alloc(const rustsecp256k1_v0_9_2_callback* error_callback, rustsecp256k1_v0_9_2_scratch* scratch, size_t size) { +static void *rustsecp256k1_v0_10_0_scratch_alloc(const rustsecp256k1_v0_10_0_callback* error_callback, rustsecp256k1_v0_10_0_scratch* scratch, size_t size) { void *ret; size_t rounded_size; @@ -56,8 +56,8 @@ static void *rustsecp256k1_v0_9_2_scratch_alloc(const rustsecp256k1_v0_9_2_callb } size = rounded_size; - if (rustsecp256k1_v0_9_2_memcmp_var(scratch->magic, "scratch", 8) != 0) { - rustsecp256k1_v0_9_2_callback_call(error_callback, "invalid scratch space"); + if (rustsecp256k1_v0_10_0_memcmp_var(scratch->magic, "scratch", 8) != 0) { + rustsecp256k1_v0_10_0_callback_call(error_callback, "invalid scratch space"); return NULL; } diff --git a/secp256k1-sys/depend/secp256k1/src/secp256k1.c b/secp256k1-sys/depend/secp256k1/src/secp256k1.c index 30125c96e..7e8314ed9 100644 --- a/secp256k1-sys/depend/secp256k1/src/secp256k1.c +++ b/secp256k1-sys/depend/secp256k1/src/secp256k1.c @@ -43,64 +43,64 @@ #define ARG_CHECK(cond) do { \ if (EXPECT(!(cond), 0)) { \ - rustsecp256k1_v0_9_2_callback_call(&ctx->illegal_callback, #cond); \ + rustsecp256k1_v0_10_0_callback_call(&ctx->illegal_callback, #cond); \ return 0; \ } \ } while(0) #define ARG_CHECK_VOID(cond) do { \ if (EXPECT(!(cond), 0)) { \ - rustsecp256k1_v0_9_2_callback_call(&ctx->illegal_callback, #cond); \ + rustsecp256k1_v0_10_0_callback_call(&ctx->illegal_callback, #cond); \ return; \ } \ } while(0) /* Note that whenever you change the context struct, you must also change the * context_eq function. */ -struct rustsecp256k1_v0_9_2_context_struct { - rustsecp256k1_v0_9_2_ecmult_gen_context ecmult_gen_ctx; - rustsecp256k1_v0_9_2_callback illegal_callback; - rustsecp256k1_v0_9_2_callback error_callback; +struct rustsecp256k1_v0_10_0_context_struct { + rustsecp256k1_v0_10_0_ecmult_gen_context ecmult_gen_ctx; + rustsecp256k1_v0_10_0_callback illegal_callback; + rustsecp256k1_v0_10_0_callback error_callback; int declassify; }; -static const rustsecp256k1_v0_9_2_context rustsecp256k1_v0_9_2_context_static_ = { +static const rustsecp256k1_v0_10_0_context rustsecp256k1_v0_10_0_context_static_ = { { 0 }, - { rustsecp256k1_v0_9_2_default_illegal_callback_fn, 0 }, - { rustsecp256k1_v0_9_2_default_error_callback_fn, 0 }, + { rustsecp256k1_v0_10_0_default_illegal_callback_fn, 0 }, + { rustsecp256k1_v0_10_0_default_error_callback_fn, 0 }, 0 }; -const rustsecp256k1_v0_9_2_context *rustsecp256k1_v0_9_2_context_static = &rustsecp256k1_v0_9_2_context_static_; -const rustsecp256k1_v0_9_2_context *rustsecp256k1_v0_9_2_context_no_precomp = &rustsecp256k1_v0_9_2_context_static_; +const rustsecp256k1_v0_10_0_context *rustsecp256k1_v0_10_0_context_static = &rustsecp256k1_v0_10_0_context_static_; +const rustsecp256k1_v0_10_0_context *rustsecp256k1_v0_10_0_context_no_precomp = &rustsecp256k1_v0_10_0_context_static_; /* Helper function that determines if a context is proper, i.e., is not the static context or a copy thereof. * - * This is intended for "context" functions such as rustsecp256k1_v0_9_2_context_clone. Function which need specific + * This is intended for "context" functions such as rustsecp256k1_v0_10_0_context_clone. Function which need specific * features of a context should still check for these features directly. For example, a function that needs * ecmult_gen should directly check for the existence of the ecmult_gen context. */ -static int rustsecp256k1_v0_9_2_context_is_proper(const rustsecp256k1_v0_9_2_context* ctx) { - return rustsecp256k1_v0_9_2_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx); +static int rustsecp256k1_v0_10_0_context_is_proper(const rustsecp256k1_v0_10_0_context* ctx) { + return rustsecp256k1_v0_10_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx); } -void rustsecp256k1_v0_9_2_selftest(void) { - if (!rustsecp256k1_v0_9_2_selftest_passes()) { - rustsecp256k1_v0_9_2_callback_call(&default_error_callback, "self test failed"); +void rustsecp256k1_v0_10_0_selftest(void) { + if (!rustsecp256k1_v0_10_0_selftest_passes()) { + rustsecp256k1_v0_10_0_callback_call(&default_error_callback, "self test failed"); } } -size_t rustsecp256k1_v0_9_2_context_preallocated_size(unsigned int flags) { - size_t ret = sizeof(rustsecp256k1_v0_9_2_context); +size_t rustsecp256k1_v0_10_0_context_preallocated_size(unsigned int flags) { + size_t ret = sizeof(rustsecp256k1_v0_10_0_context); /* A return value of 0 is reserved as an indicator for errors when we call this function internally. */ VERIFY_CHECK(ret != 0); if (EXPECT((flags & SECP256K1_FLAGS_TYPE_MASK) != SECP256K1_FLAGS_TYPE_CONTEXT, 0)) { - rustsecp256k1_v0_9_2_callback_call(&default_illegal_callback, + rustsecp256k1_v0_10_0_callback_call(&default_illegal_callback, "Invalid flags"); return 0; } if (EXPECT(!SECP256K1_CHECKMEM_RUNNING() && (flags & SECP256K1_FLAGS_BIT_CONTEXT_DECLASSIFY), 0)) { - rustsecp256k1_v0_9_2_callback_call(&default_illegal_callback, + rustsecp256k1_v0_10_0_callback_call(&default_illegal_callback, "Declassify flag requires running with memory checking"); return 0; } @@ -108,76 +108,76 @@ size_t rustsecp256k1_v0_9_2_context_preallocated_size(unsigned int flags) { return ret; } -size_t rustsecp256k1_v0_9_2_context_preallocated_clone_size(const rustsecp256k1_v0_9_2_context* ctx) { +size_t rustsecp256k1_v0_10_0_context_preallocated_clone_size(const rustsecp256k1_v0_10_0_context* ctx) { VERIFY_CHECK(ctx != NULL); - ARG_CHECK(rustsecp256k1_v0_9_2_context_is_proper(ctx)); - return sizeof(rustsecp256k1_v0_9_2_context); + ARG_CHECK(rustsecp256k1_v0_10_0_context_is_proper(ctx)); + return sizeof(rustsecp256k1_v0_10_0_context); } -rustsecp256k1_v0_9_2_context* rustsecp256k1_v0_9_2_context_preallocated_create(void* prealloc, unsigned int flags) { +rustsecp256k1_v0_10_0_context* rustsecp256k1_v0_10_0_context_preallocated_create(void* prealloc, unsigned int flags) { size_t prealloc_size; - rustsecp256k1_v0_9_2_context* ret; + rustsecp256k1_v0_10_0_context* ret; - rustsecp256k1_v0_9_2_selftest(); + rustsecp256k1_v0_10_0_selftest(); - prealloc_size = rustsecp256k1_v0_9_2_context_preallocated_size(flags); + prealloc_size = rustsecp256k1_v0_10_0_context_preallocated_size(flags); if (prealloc_size == 0) { return NULL; } VERIFY_CHECK(prealloc != NULL); - ret = (rustsecp256k1_v0_9_2_context*)prealloc; + ret = (rustsecp256k1_v0_10_0_context*)prealloc; ret->illegal_callback = default_illegal_callback; ret->error_callback = default_error_callback; - /* Flags have been checked by rustsecp256k1_v0_9_2_context_preallocated_size. */ + /* Flags have been checked by rustsecp256k1_v0_10_0_context_preallocated_size. */ VERIFY_CHECK((flags & SECP256K1_FLAGS_TYPE_MASK) == SECP256K1_FLAGS_TYPE_CONTEXT); - rustsecp256k1_v0_9_2_ecmult_gen_context_build(&ret->ecmult_gen_ctx); + rustsecp256k1_v0_10_0_ecmult_gen_context_build(&ret->ecmult_gen_ctx); ret->declassify = !!(flags & SECP256K1_FLAGS_BIT_CONTEXT_DECLASSIFY); return ret; } -rustsecp256k1_v0_9_2_context* rustsecp256k1_v0_9_2_context_preallocated_clone(const rustsecp256k1_v0_9_2_context* ctx, void* prealloc) { - rustsecp256k1_v0_9_2_context* ret; +rustsecp256k1_v0_10_0_context* rustsecp256k1_v0_10_0_context_preallocated_clone(const rustsecp256k1_v0_10_0_context* ctx, void* prealloc) { + rustsecp256k1_v0_10_0_context* ret; VERIFY_CHECK(ctx != NULL); ARG_CHECK(prealloc != NULL); - ARG_CHECK(rustsecp256k1_v0_9_2_context_is_proper(ctx)); + ARG_CHECK(rustsecp256k1_v0_10_0_context_is_proper(ctx)); - ret = (rustsecp256k1_v0_9_2_context*)prealloc; + ret = (rustsecp256k1_v0_10_0_context*)prealloc; *ret = *ctx; return ret; } -void rustsecp256k1_v0_9_2_context_preallocated_destroy(rustsecp256k1_v0_9_2_context* ctx) { - ARG_CHECK_VOID(ctx == NULL || rustsecp256k1_v0_9_2_context_is_proper(ctx)); +void rustsecp256k1_v0_10_0_context_preallocated_destroy(rustsecp256k1_v0_10_0_context* ctx) { + ARG_CHECK_VOID(ctx == NULL || rustsecp256k1_v0_10_0_context_is_proper(ctx)); /* Defined as noop */ if (ctx == NULL) { return; } - rustsecp256k1_v0_9_2_ecmult_gen_context_clear(&ctx->ecmult_gen_ctx); + rustsecp256k1_v0_10_0_ecmult_gen_context_clear(&ctx->ecmult_gen_ctx); } -void rustsecp256k1_v0_9_2_context_set_illegal_callback(rustsecp256k1_v0_9_2_context* ctx, void (*fun)(const char* message, void* data), const void* data) { - /* We compare pointers instead of checking rustsecp256k1_v0_9_2_context_is_proper() here +void rustsecp256k1_v0_10_0_context_set_illegal_callback(rustsecp256k1_v0_10_0_context* ctx, void (*fun)(const char* message, void* data), const void* data) { + /* We compare pointers instead of checking rustsecp256k1_v0_10_0_context_is_proper() here because setting callbacks is allowed on *copies* of the static context: it's harmless and makes testing easier. */ - ARG_CHECK_VOID(ctx != rustsecp256k1_v0_9_2_context_static); + ARG_CHECK_VOID(ctx != rustsecp256k1_v0_10_0_context_static); if (fun == NULL) { - fun = rustsecp256k1_v0_9_2_default_illegal_callback_fn; + fun = rustsecp256k1_v0_10_0_default_illegal_callback_fn; } ctx->illegal_callback.fn = fun; ctx->illegal_callback.data = data; } -void rustsecp256k1_v0_9_2_context_set_error_callback(rustsecp256k1_v0_9_2_context* ctx, void (*fun)(const char* message, void* data), const void* data) { - /* We compare pointers instead of checking rustsecp256k1_v0_9_2_context_is_proper() here +void rustsecp256k1_v0_10_0_context_set_error_callback(rustsecp256k1_v0_10_0_context* ctx, void (*fun)(const char* message, void* data), const void* data) { + /* We compare pointers instead of checking rustsecp256k1_v0_10_0_context_is_proper() here because setting callbacks is allowed on *copies* of the static context: it's harmless and makes testing easier. */ - ARG_CHECK_VOID(ctx != rustsecp256k1_v0_9_2_context_static); + ARG_CHECK_VOID(ctx != rustsecp256k1_v0_10_0_context_static); if (fun == NULL) { - fun = rustsecp256k1_v0_9_2_default_error_callback_fn; + fun = rustsecp256k1_v0_10_0_default_error_callback_fn; } ctx->error_callback.fn = fun; ctx->error_callback.data = data; @@ -186,63 +186,63 @@ void rustsecp256k1_v0_9_2_context_set_error_callback(rustsecp256k1_v0_9_2_contex /* Mark memory as no-longer-secret for the purpose of analysing constant-time behaviour * of the software. */ -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_declassify(const rustsecp256k1_v0_9_2_context* ctx, const void *p, size_t len) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_declassify(const rustsecp256k1_v0_10_0_context* ctx, const void *p, size_t len) { if (EXPECT(ctx->declassify, 0)) SECP256K1_CHECKMEM_DEFINE(p, len); } -static int rustsecp256k1_v0_9_2_pubkey_load(const rustsecp256k1_v0_9_2_context* ctx, rustsecp256k1_v0_9_2_ge* ge, const rustsecp256k1_v0_9_2_pubkey* pubkey) { - if (sizeof(rustsecp256k1_v0_9_2_ge_storage) == 64) { - /* When the rustsecp256k1_v0_9_2_ge_storage type is exactly 64 byte, use its - * representation inside rustsecp256k1_v0_9_2_pubkey, as conversion is very fast. - * Note that rustsecp256k1_v0_9_2_pubkey_save must use the same representation. */ - rustsecp256k1_v0_9_2_ge_storage s; +static int rustsecp256k1_v0_10_0_pubkey_load(const rustsecp256k1_v0_10_0_context* ctx, rustsecp256k1_v0_10_0_ge* ge, const rustsecp256k1_v0_10_0_pubkey* pubkey) { + if (sizeof(rustsecp256k1_v0_10_0_ge_storage) == 64) { + /* When the rustsecp256k1_v0_10_0_ge_storage type is exactly 64 byte, use its + * representation inside rustsecp256k1_v0_10_0_pubkey, as conversion is very fast. + * Note that rustsecp256k1_v0_10_0_pubkey_save must use the same representation. */ + rustsecp256k1_v0_10_0_ge_storage s; memcpy(&s, &pubkey->data[0], sizeof(s)); - rustsecp256k1_v0_9_2_ge_from_storage(ge, &s); + rustsecp256k1_v0_10_0_ge_from_storage(ge, &s); } else { /* Otherwise, fall back to 32-byte big endian for X and Y. */ - rustsecp256k1_v0_9_2_fe x, y; - ARG_CHECK(rustsecp256k1_v0_9_2_fe_set_b32_limit(&x, pubkey->data)); - ARG_CHECK(rustsecp256k1_v0_9_2_fe_set_b32_limit(&y, pubkey->data + 32)); - rustsecp256k1_v0_9_2_ge_set_xy(ge, &x, &y); + rustsecp256k1_v0_10_0_fe x, y; + ARG_CHECK(rustsecp256k1_v0_10_0_fe_set_b32_limit(&x, pubkey->data)); + ARG_CHECK(rustsecp256k1_v0_10_0_fe_set_b32_limit(&y, pubkey->data + 32)); + rustsecp256k1_v0_10_0_ge_set_xy(ge, &x, &y); } - ARG_CHECK(!rustsecp256k1_v0_9_2_fe_is_zero(&ge->x)); + ARG_CHECK(!rustsecp256k1_v0_10_0_fe_is_zero(&ge->x)); return 1; } -static void rustsecp256k1_v0_9_2_pubkey_save(rustsecp256k1_v0_9_2_pubkey* pubkey, rustsecp256k1_v0_9_2_ge* ge) { - if (sizeof(rustsecp256k1_v0_9_2_ge_storage) == 64) { - rustsecp256k1_v0_9_2_ge_storage s; - rustsecp256k1_v0_9_2_ge_to_storage(&s, ge); +static void rustsecp256k1_v0_10_0_pubkey_save(rustsecp256k1_v0_10_0_pubkey* pubkey, rustsecp256k1_v0_10_0_ge* ge) { + if (sizeof(rustsecp256k1_v0_10_0_ge_storage) == 64) { + rustsecp256k1_v0_10_0_ge_storage s; + rustsecp256k1_v0_10_0_ge_to_storage(&s, ge); memcpy(&pubkey->data[0], &s, sizeof(s)); } else { - VERIFY_CHECK(!rustsecp256k1_v0_9_2_ge_is_infinity(ge)); - rustsecp256k1_v0_9_2_fe_normalize_var(&ge->x); - rustsecp256k1_v0_9_2_fe_normalize_var(&ge->y); - rustsecp256k1_v0_9_2_fe_get_b32(pubkey->data, &ge->x); - rustsecp256k1_v0_9_2_fe_get_b32(pubkey->data + 32, &ge->y); + VERIFY_CHECK(!rustsecp256k1_v0_10_0_ge_is_infinity(ge)); + rustsecp256k1_v0_10_0_fe_normalize_var(&ge->x); + rustsecp256k1_v0_10_0_fe_normalize_var(&ge->y); + rustsecp256k1_v0_10_0_fe_get_b32(pubkey->data, &ge->x); + rustsecp256k1_v0_10_0_fe_get_b32(pubkey->data + 32, &ge->y); } } -int rustsecp256k1_v0_9_2_ec_pubkey_parse(const rustsecp256k1_v0_9_2_context* ctx, rustsecp256k1_v0_9_2_pubkey* pubkey, const unsigned char *input, size_t inputlen) { - rustsecp256k1_v0_9_2_ge Q; +int rustsecp256k1_v0_10_0_ec_pubkey_parse(const rustsecp256k1_v0_10_0_context* ctx, rustsecp256k1_v0_10_0_pubkey* pubkey, const unsigned char *input, size_t inputlen) { + rustsecp256k1_v0_10_0_ge Q; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); memset(pubkey, 0, sizeof(*pubkey)); ARG_CHECK(input != NULL); - if (!rustsecp256k1_v0_9_2_eckey_pubkey_parse(&Q, input, inputlen)) { + if (!rustsecp256k1_v0_10_0_eckey_pubkey_parse(&Q, input, inputlen)) { return 0; } - if (!rustsecp256k1_v0_9_2_ge_is_in_correct_subgroup(&Q)) { + if (!rustsecp256k1_v0_10_0_ge_is_in_correct_subgroup(&Q)) { return 0; } - rustsecp256k1_v0_9_2_pubkey_save(pubkey, &Q); - rustsecp256k1_v0_9_2_ge_clear(&Q); + rustsecp256k1_v0_10_0_pubkey_save(pubkey, &Q); + rustsecp256k1_v0_10_0_ge_clear(&Q); return 1; } -int rustsecp256k1_v0_9_2_ec_pubkey_serialize(const rustsecp256k1_v0_9_2_context* ctx, unsigned char *output, size_t *outputlen, const rustsecp256k1_v0_9_2_pubkey* pubkey, unsigned int flags) { - rustsecp256k1_v0_9_2_ge Q; +int rustsecp256k1_v0_10_0_ec_pubkey_serialize(const rustsecp256k1_v0_10_0_context* ctx, unsigned char *output, size_t *outputlen, const rustsecp256k1_v0_10_0_pubkey* pubkey, unsigned int flags) { + rustsecp256k1_v0_10_0_ge Q; size_t len; int ret = 0; @@ -255,8 +255,8 @@ int rustsecp256k1_v0_9_2_ec_pubkey_serialize(const rustsecp256k1_v0_9_2_context* memset(output, 0, len); ARG_CHECK(pubkey != NULL); ARG_CHECK((flags & SECP256K1_FLAGS_TYPE_MASK) == SECP256K1_FLAGS_TYPE_COMPRESSION); - if (rustsecp256k1_v0_9_2_pubkey_load(ctx, &Q, pubkey)) { - ret = rustsecp256k1_v0_9_2_eckey_pubkey_serialize(&Q, output, &len, flags & SECP256K1_FLAGS_BIT_COMPRESSION); + if (rustsecp256k1_v0_10_0_pubkey_load(ctx, &Q, pubkey)) { + ret = rustsecp256k1_v0_10_0_eckey_pubkey_serialize(&Q, output, &len, flags & SECP256K1_FLAGS_BIT_COMPRESSION); if (ret) { *outputlen = len; } @@ -264,9 +264,9 @@ int rustsecp256k1_v0_9_2_ec_pubkey_serialize(const rustsecp256k1_v0_9_2_context* return ret; } -int rustsecp256k1_v0_9_2_ec_pubkey_cmp(const rustsecp256k1_v0_9_2_context* ctx, const rustsecp256k1_v0_9_2_pubkey* pubkey0, const rustsecp256k1_v0_9_2_pubkey* pubkey1) { +int rustsecp256k1_v0_10_0_ec_pubkey_cmp(const rustsecp256k1_v0_10_0_context* ctx, const rustsecp256k1_v0_10_0_pubkey* pubkey0, const rustsecp256k1_v0_10_0_pubkey* pubkey1) { unsigned char out[2][33]; - const rustsecp256k1_v0_9_2_pubkey* pk[2]; + const rustsecp256k1_v0_10_0_pubkey* pk[2]; int i; VERIFY_CHECK(ctx != NULL); @@ -279,7 +279,7 @@ int rustsecp256k1_v0_9_2_ec_pubkey_cmp(const rustsecp256k1_v0_9_2_context* ctx, * results in consistent comparisons even if NULL or invalid pubkeys are * involved and prevents edge cases such as sorting algorithms that use * this function and do not terminate as a result. */ - if (!rustsecp256k1_v0_9_2_ec_pubkey_serialize(ctx, out[i], &out_size, pk[i], SECP256K1_EC_COMPRESSED)) { + if (!rustsecp256k1_v0_10_0_ec_pubkey_serialize(ctx, out[i], &out_size, pk[i], SECP256K1_EC_COMPRESSED)) { /* Note that ec_pubkey_serialize should already set the output to * zero in that case, but it's not guaranteed by the API, we can't * test it and writing a VERIFY_CHECK is more complex than @@ -287,42 +287,42 @@ int rustsecp256k1_v0_9_2_ec_pubkey_cmp(const rustsecp256k1_v0_9_2_context* ctx, memset(out[i], 0, sizeof(out[i])); } } - return rustsecp256k1_v0_9_2_memcmp_var(out[0], out[1], sizeof(out[0])); + return rustsecp256k1_v0_10_0_memcmp_var(out[0], out[1], sizeof(out[0])); } -static void rustsecp256k1_v0_9_2_ecdsa_signature_load(const rustsecp256k1_v0_9_2_context* ctx, rustsecp256k1_v0_9_2_scalar* r, rustsecp256k1_v0_9_2_scalar* s, const rustsecp256k1_v0_9_2_ecdsa_signature* sig) { +static void rustsecp256k1_v0_10_0_ecdsa_signature_load(const rustsecp256k1_v0_10_0_context* ctx, rustsecp256k1_v0_10_0_scalar* r, rustsecp256k1_v0_10_0_scalar* s, const rustsecp256k1_v0_10_0_ecdsa_signature* sig) { (void)ctx; - if (sizeof(rustsecp256k1_v0_9_2_scalar) == 32) { - /* When the rustsecp256k1_v0_9_2_scalar type is exactly 32 byte, use its - * representation inside rustsecp256k1_v0_9_2_ecdsa_signature, as conversion is very fast. - * Note that rustsecp256k1_v0_9_2_ecdsa_signature_save must use the same representation. */ + if (sizeof(rustsecp256k1_v0_10_0_scalar) == 32) { + /* When the rustsecp256k1_v0_10_0_scalar type is exactly 32 byte, use its + * representation inside rustsecp256k1_v0_10_0_ecdsa_signature, as conversion is very fast. + * Note that rustsecp256k1_v0_10_0_ecdsa_signature_save must use the same representation. */ memcpy(r, &sig->data[0], 32); memcpy(s, &sig->data[32], 32); } else { - rustsecp256k1_v0_9_2_scalar_set_b32(r, &sig->data[0], NULL); - rustsecp256k1_v0_9_2_scalar_set_b32(s, &sig->data[32], NULL); + rustsecp256k1_v0_10_0_scalar_set_b32(r, &sig->data[0], NULL); + rustsecp256k1_v0_10_0_scalar_set_b32(s, &sig->data[32], NULL); } } -static void rustsecp256k1_v0_9_2_ecdsa_signature_save(rustsecp256k1_v0_9_2_ecdsa_signature* sig, const rustsecp256k1_v0_9_2_scalar* r, const rustsecp256k1_v0_9_2_scalar* s) { - if (sizeof(rustsecp256k1_v0_9_2_scalar) == 32) { +static void rustsecp256k1_v0_10_0_ecdsa_signature_save(rustsecp256k1_v0_10_0_ecdsa_signature* sig, const rustsecp256k1_v0_10_0_scalar* r, const rustsecp256k1_v0_10_0_scalar* s) { + if (sizeof(rustsecp256k1_v0_10_0_scalar) == 32) { memcpy(&sig->data[0], r, 32); memcpy(&sig->data[32], s, 32); } else { - rustsecp256k1_v0_9_2_scalar_get_b32(&sig->data[0], r); - rustsecp256k1_v0_9_2_scalar_get_b32(&sig->data[32], s); + rustsecp256k1_v0_10_0_scalar_get_b32(&sig->data[0], r); + rustsecp256k1_v0_10_0_scalar_get_b32(&sig->data[32], s); } } -int rustsecp256k1_v0_9_2_ecdsa_signature_parse_der(const rustsecp256k1_v0_9_2_context* ctx, rustsecp256k1_v0_9_2_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) { - rustsecp256k1_v0_9_2_scalar r, s; +int rustsecp256k1_v0_10_0_ecdsa_signature_parse_der(const rustsecp256k1_v0_10_0_context* ctx, rustsecp256k1_v0_10_0_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) { + rustsecp256k1_v0_10_0_scalar r, s; VERIFY_CHECK(ctx != NULL); ARG_CHECK(sig != NULL); ARG_CHECK(input != NULL); - if (rustsecp256k1_v0_9_2_ecdsa_sig_parse(&r, &s, input, inputlen)) { - rustsecp256k1_v0_9_2_ecdsa_signature_save(sig, &r, &s); + if (rustsecp256k1_v0_10_0_ecdsa_sig_parse(&r, &s, input, inputlen)) { + rustsecp256k1_v0_10_0_ecdsa_signature_save(sig, &r, &s); return 1; } else { memset(sig, 0, sizeof(*sig)); @@ -330,8 +330,8 @@ int rustsecp256k1_v0_9_2_ecdsa_signature_parse_der(const rustsecp256k1_v0_9_2_co } } -int rustsecp256k1_v0_9_2_ecdsa_signature_parse_compact(const rustsecp256k1_v0_9_2_context* ctx, rustsecp256k1_v0_9_2_ecdsa_signature* sig, const unsigned char *input64) { - rustsecp256k1_v0_9_2_scalar r, s; +int rustsecp256k1_v0_10_0_ecdsa_signature_parse_compact(const rustsecp256k1_v0_10_0_context* ctx, rustsecp256k1_v0_10_0_ecdsa_signature* sig, const unsigned char *input64) { + rustsecp256k1_v0_10_0_scalar r, s; int ret = 1; int overflow = 0; @@ -339,76 +339,76 @@ int rustsecp256k1_v0_9_2_ecdsa_signature_parse_compact(const rustsecp256k1_v0_9_ ARG_CHECK(sig != NULL); ARG_CHECK(input64 != NULL); - rustsecp256k1_v0_9_2_scalar_set_b32(&r, &input64[0], &overflow); + rustsecp256k1_v0_10_0_scalar_set_b32(&r, &input64[0], &overflow); ret &= !overflow; - rustsecp256k1_v0_9_2_scalar_set_b32(&s, &input64[32], &overflow); + rustsecp256k1_v0_10_0_scalar_set_b32(&s, &input64[32], &overflow); ret &= !overflow; if (ret) { - rustsecp256k1_v0_9_2_ecdsa_signature_save(sig, &r, &s); + rustsecp256k1_v0_10_0_ecdsa_signature_save(sig, &r, &s); } else { memset(sig, 0, sizeof(*sig)); } return ret; } -int rustsecp256k1_v0_9_2_ecdsa_signature_serialize_der(const rustsecp256k1_v0_9_2_context* ctx, unsigned char *output, size_t *outputlen, const rustsecp256k1_v0_9_2_ecdsa_signature* sig) { - rustsecp256k1_v0_9_2_scalar r, s; +int rustsecp256k1_v0_10_0_ecdsa_signature_serialize_der(const rustsecp256k1_v0_10_0_context* ctx, unsigned char *output, size_t *outputlen, const rustsecp256k1_v0_10_0_ecdsa_signature* sig) { + rustsecp256k1_v0_10_0_scalar r, s; VERIFY_CHECK(ctx != NULL); ARG_CHECK(output != NULL); ARG_CHECK(outputlen != NULL); ARG_CHECK(sig != NULL); - rustsecp256k1_v0_9_2_ecdsa_signature_load(ctx, &r, &s, sig); - return rustsecp256k1_v0_9_2_ecdsa_sig_serialize(output, outputlen, &r, &s); + rustsecp256k1_v0_10_0_ecdsa_signature_load(ctx, &r, &s, sig); + return rustsecp256k1_v0_10_0_ecdsa_sig_serialize(output, outputlen, &r, &s); } -int rustsecp256k1_v0_9_2_ecdsa_signature_serialize_compact(const rustsecp256k1_v0_9_2_context* ctx, unsigned char *output64, const rustsecp256k1_v0_9_2_ecdsa_signature* sig) { - rustsecp256k1_v0_9_2_scalar r, s; +int rustsecp256k1_v0_10_0_ecdsa_signature_serialize_compact(const rustsecp256k1_v0_10_0_context* ctx, unsigned char *output64, const rustsecp256k1_v0_10_0_ecdsa_signature* sig) { + rustsecp256k1_v0_10_0_scalar r, s; VERIFY_CHECK(ctx != NULL); ARG_CHECK(output64 != NULL); ARG_CHECK(sig != NULL); - rustsecp256k1_v0_9_2_ecdsa_signature_load(ctx, &r, &s, sig); - rustsecp256k1_v0_9_2_scalar_get_b32(&output64[0], &r); - rustsecp256k1_v0_9_2_scalar_get_b32(&output64[32], &s); + rustsecp256k1_v0_10_0_ecdsa_signature_load(ctx, &r, &s, sig); + rustsecp256k1_v0_10_0_scalar_get_b32(&output64[0], &r); + rustsecp256k1_v0_10_0_scalar_get_b32(&output64[32], &s); return 1; } -int rustsecp256k1_v0_9_2_ecdsa_signature_normalize(const rustsecp256k1_v0_9_2_context* ctx, rustsecp256k1_v0_9_2_ecdsa_signature *sigout, const rustsecp256k1_v0_9_2_ecdsa_signature *sigin) { - rustsecp256k1_v0_9_2_scalar r, s; +int rustsecp256k1_v0_10_0_ecdsa_signature_normalize(const rustsecp256k1_v0_10_0_context* ctx, rustsecp256k1_v0_10_0_ecdsa_signature *sigout, const rustsecp256k1_v0_10_0_ecdsa_signature *sigin) { + rustsecp256k1_v0_10_0_scalar r, s; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(sigin != NULL); - rustsecp256k1_v0_9_2_ecdsa_signature_load(ctx, &r, &s, sigin); - ret = rustsecp256k1_v0_9_2_scalar_is_high(&s); + rustsecp256k1_v0_10_0_ecdsa_signature_load(ctx, &r, &s, sigin); + ret = rustsecp256k1_v0_10_0_scalar_is_high(&s); if (sigout != NULL) { if (ret) { - rustsecp256k1_v0_9_2_scalar_negate(&s, &s); + rustsecp256k1_v0_10_0_scalar_negate(&s, &s); } - rustsecp256k1_v0_9_2_ecdsa_signature_save(sigout, &r, &s); + rustsecp256k1_v0_10_0_ecdsa_signature_save(sigout, &r, &s); } return ret; } -int rustsecp256k1_v0_9_2_ecdsa_verify(const rustsecp256k1_v0_9_2_context* ctx, const rustsecp256k1_v0_9_2_ecdsa_signature *sig, const unsigned char *msghash32, const rustsecp256k1_v0_9_2_pubkey *pubkey) { - rustsecp256k1_v0_9_2_ge q; - rustsecp256k1_v0_9_2_scalar r, s; - rustsecp256k1_v0_9_2_scalar m; +int rustsecp256k1_v0_10_0_ecdsa_verify(const rustsecp256k1_v0_10_0_context* ctx, const rustsecp256k1_v0_10_0_ecdsa_signature *sig, const unsigned char *msghash32, const rustsecp256k1_v0_10_0_pubkey *pubkey) { + rustsecp256k1_v0_10_0_ge q; + rustsecp256k1_v0_10_0_scalar r, s; + rustsecp256k1_v0_10_0_scalar m; VERIFY_CHECK(ctx != NULL); ARG_CHECK(msghash32 != NULL); ARG_CHECK(sig != NULL); ARG_CHECK(pubkey != NULL); - rustsecp256k1_v0_9_2_scalar_set_b32(&m, msghash32, NULL); - rustsecp256k1_v0_9_2_ecdsa_signature_load(ctx, &r, &s, sig); - return (!rustsecp256k1_v0_9_2_scalar_is_high(&s) && - rustsecp256k1_v0_9_2_pubkey_load(ctx, &q, pubkey) && - rustsecp256k1_v0_9_2_ecdsa_sig_verify(&r, &s, &q, &m)); + rustsecp256k1_v0_10_0_scalar_set_b32(&m, msghash32, NULL); + rustsecp256k1_v0_10_0_ecdsa_signature_load(ctx, &r, &s, sig); + return (!rustsecp256k1_v0_10_0_scalar_is_high(&s) && + rustsecp256k1_v0_10_0_pubkey_load(ctx, &q, pubkey) && + rustsecp256k1_v0_10_0_ecdsa_sig_verify(&r, &s, &q, &m)); } static SECP256K1_INLINE void buffer_append(unsigned char *buf, unsigned int *offset, const void *data, unsigned int len) { @@ -419,12 +419,12 @@ static SECP256K1_INLINE void buffer_append(unsigned char *buf, unsigned int *off static int nonce_function_rfc6979(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) { unsigned char keydata[112]; unsigned int offset = 0; - rustsecp256k1_v0_9_2_rfc6979_hmac_sha256 rng; + rustsecp256k1_v0_10_0_rfc6979_hmac_sha256 rng; unsigned int i; - rustsecp256k1_v0_9_2_scalar msg; + rustsecp256k1_v0_10_0_scalar msg; unsigned char msgmod32[32]; - rustsecp256k1_v0_9_2_scalar_set_b32(&msg, msg32, NULL); - rustsecp256k1_v0_9_2_scalar_get_b32(msgmod32, &msg); + rustsecp256k1_v0_10_0_scalar_set_b32(&msg, msg32, NULL); + rustsecp256k1_v0_10_0_scalar_get_b32(msgmod32, &msg); /* We feed a byte array to the PRNG as input, consisting of: * - the private key (32 bytes) and reduced message (32 bytes), see RFC 6979 3.2d. * - optionally 32 extra bytes of data, see RFC 6979 3.6 Additional Data. @@ -441,51 +441,51 @@ static int nonce_function_rfc6979(unsigned char *nonce32, const unsigned char *m if (algo16 != NULL) { buffer_append(keydata, &offset, algo16, 16); } - rustsecp256k1_v0_9_2_rfc6979_hmac_sha256_initialize(&rng, keydata, offset); + rustsecp256k1_v0_10_0_rfc6979_hmac_sha256_initialize(&rng, keydata, offset); memset(keydata, 0, sizeof(keydata)); for (i = 0; i <= counter; i++) { - rustsecp256k1_v0_9_2_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); + rustsecp256k1_v0_10_0_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); } - rustsecp256k1_v0_9_2_rfc6979_hmac_sha256_finalize(&rng); + rustsecp256k1_v0_10_0_rfc6979_hmac_sha256_finalize(&rng); return 1; } -const rustsecp256k1_v0_9_2_nonce_function rustsecp256k1_v0_9_2_nonce_function_rfc6979 = nonce_function_rfc6979; -const rustsecp256k1_v0_9_2_nonce_function rustsecp256k1_v0_9_2_nonce_function_default = nonce_function_rfc6979; +const rustsecp256k1_v0_10_0_nonce_function rustsecp256k1_v0_10_0_nonce_function_rfc6979 = nonce_function_rfc6979; +const rustsecp256k1_v0_10_0_nonce_function rustsecp256k1_v0_10_0_nonce_function_default = nonce_function_rfc6979; -static int rustsecp256k1_v0_9_2_ecdsa_sign_inner(const rustsecp256k1_v0_9_2_context* ctx, rustsecp256k1_v0_9_2_scalar* r, rustsecp256k1_v0_9_2_scalar* s, int* recid, const unsigned char *msg32, const unsigned char *seckey, rustsecp256k1_v0_9_2_nonce_function noncefp, const void* noncedata) { - rustsecp256k1_v0_9_2_scalar sec, non, msg; +static int rustsecp256k1_v0_10_0_ecdsa_sign_inner(const rustsecp256k1_v0_10_0_context* ctx, rustsecp256k1_v0_10_0_scalar* r, rustsecp256k1_v0_10_0_scalar* s, int* recid, const unsigned char *msg32, const unsigned char *seckey, rustsecp256k1_v0_10_0_nonce_function noncefp, const void* noncedata) { + rustsecp256k1_v0_10_0_scalar sec, non, msg; int ret = 0; int is_sec_valid; unsigned char nonce32[32]; unsigned int count = 0; /* Default initialization here is important so we won't pass uninit values to the cmov in the end */ - *r = rustsecp256k1_v0_9_2_scalar_zero; - *s = rustsecp256k1_v0_9_2_scalar_zero; + *r = rustsecp256k1_v0_10_0_scalar_zero; + *s = rustsecp256k1_v0_10_0_scalar_zero; if (recid) { *recid = 0; } if (noncefp == NULL) { - noncefp = rustsecp256k1_v0_9_2_nonce_function_default; + noncefp = rustsecp256k1_v0_10_0_nonce_function_default; } /* Fail if the secret key is invalid. */ - is_sec_valid = rustsecp256k1_v0_9_2_scalar_set_b32_seckey(&sec, seckey); - rustsecp256k1_v0_9_2_scalar_cmov(&sec, &rustsecp256k1_v0_9_2_scalar_one, !is_sec_valid); - rustsecp256k1_v0_9_2_scalar_set_b32(&msg, msg32, NULL); + is_sec_valid = rustsecp256k1_v0_10_0_scalar_set_b32_seckey(&sec, seckey); + rustsecp256k1_v0_10_0_scalar_cmov(&sec, &rustsecp256k1_v0_10_0_scalar_one, !is_sec_valid); + rustsecp256k1_v0_10_0_scalar_set_b32(&msg, msg32, NULL); while (1) { int is_nonce_valid; ret = !!noncefp(nonce32, msg32, seckey, NULL, (void*)noncedata, count); if (!ret) { break; } - is_nonce_valid = rustsecp256k1_v0_9_2_scalar_set_b32_seckey(&non, nonce32); + is_nonce_valid = rustsecp256k1_v0_10_0_scalar_set_b32_seckey(&non, nonce32); /* The nonce is still secret here, but it being invalid is is less likely than 1:2^255. */ - rustsecp256k1_v0_9_2_declassify(ctx, &is_nonce_valid, sizeof(is_nonce_valid)); + rustsecp256k1_v0_10_0_declassify(ctx, &is_nonce_valid, sizeof(is_nonce_valid)); if (is_nonce_valid) { - ret = rustsecp256k1_v0_9_2_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, r, s, &sec, &msg, &non, recid); + ret = rustsecp256k1_v0_10_0_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, r, s, &sec, &msg, &non, recid); /* The final signature is no longer a secret, nor is the fact that we were successful or not. */ - rustsecp256k1_v0_9_2_declassify(ctx, &ret, sizeof(ret)); + rustsecp256k1_v0_10_0_declassify(ctx, &ret, sizeof(ret)); if (ret) { break; } @@ -497,202 +497,202 @@ static int rustsecp256k1_v0_9_2_ecdsa_sign_inner(const rustsecp256k1_v0_9_2_cont * used as a branching variable. */ ret &= is_sec_valid; memset(nonce32, 0, 32); - rustsecp256k1_v0_9_2_scalar_clear(&msg); - rustsecp256k1_v0_9_2_scalar_clear(&non); - rustsecp256k1_v0_9_2_scalar_clear(&sec); - rustsecp256k1_v0_9_2_scalar_cmov(r, &rustsecp256k1_v0_9_2_scalar_zero, !ret); - rustsecp256k1_v0_9_2_scalar_cmov(s, &rustsecp256k1_v0_9_2_scalar_zero, !ret); + rustsecp256k1_v0_10_0_scalar_clear(&msg); + rustsecp256k1_v0_10_0_scalar_clear(&non); + rustsecp256k1_v0_10_0_scalar_clear(&sec); + rustsecp256k1_v0_10_0_scalar_cmov(r, &rustsecp256k1_v0_10_0_scalar_zero, !ret); + rustsecp256k1_v0_10_0_scalar_cmov(s, &rustsecp256k1_v0_10_0_scalar_zero, !ret); if (recid) { const int zero = 0; - rustsecp256k1_v0_9_2_int_cmov(recid, &zero, !ret); + rustsecp256k1_v0_10_0_int_cmov(recid, &zero, !ret); } return ret; } -int rustsecp256k1_v0_9_2_ecdsa_sign(const rustsecp256k1_v0_9_2_context* ctx, rustsecp256k1_v0_9_2_ecdsa_signature *signature, const unsigned char *msghash32, const unsigned char *seckey, rustsecp256k1_v0_9_2_nonce_function noncefp, const void* noncedata) { - rustsecp256k1_v0_9_2_scalar r, s; +int rustsecp256k1_v0_10_0_ecdsa_sign(const rustsecp256k1_v0_10_0_context* ctx, rustsecp256k1_v0_10_0_ecdsa_signature *signature, const unsigned char *msghash32, const unsigned char *seckey, rustsecp256k1_v0_10_0_nonce_function noncefp, const void* noncedata) { + rustsecp256k1_v0_10_0_scalar r, s; int ret; VERIFY_CHECK(ctx != NULL); - ARG_CHECK(rustsecp256k1_v0_9_2_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); + ARG_CHECK(rustsecp256k1_v0_10_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); ARG_CHECK(msghash32 != NULL); ARG_CHECK(signature != NULL); ARG_CHECK(seckey != NULL); - ret = rustsecp256k1_v0_9_2_ecdsa_sign_inner(ctx, &r, &s, NULL, msghash32, seckey, noncefp, noncedata); - rustsecp256k1_v0_9_2_ecdsa_signature_save(signature, &r, &s); + ret = rustsecp256k1_v0_10_0_ecdsa_sign_inner(ctx, &r, &s, NULL, msghash32, seckey, noncefp, noncedata); + rustsecp256k1_v0_10_0_ecdsa_signature_save(signature, &r, &s); return ret; } -int rustsecp256k1_v0_9_2_ec_seckey_verify(const rustsecp256k1_v0_9_2_context* ctx, const unsigned char *seckey) { - rustsecp256k1_v0_9_2_scalar sec; +int rustsecp256k1_v0_10_0_ec_seckey_verify(const rustsecp256k1_v0_10_0_context* ctx, const unsigned char *seckey) { + rustsecp256k1_v0_10_0_scalar sec; int ret; VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); - ret = rustsecp256k1_v0_9_2_scalar_set_b32_seckey(&sec, seckey); - rustsecp256k1_v0_9_2_scalar_clear(&sec); + ret = rustsecp256k1_v0_10_0_scalar_set_b32_seckey(&sec, seckey); + rustsecp256k1_v0_10_0_scalar_clear(&sec); return ret; } -static int rustsecp256k1_v0_9_2_ec_pubkey_create_helper(const rustsecp256k1_v0_9_2_ecmult_gen_context *ecmult_gen_ctx, rustsecp256k1_v0_9_2_scalar *seckey_scalar, rustsecp256k1_v0_9_2_ge *p, const unsigned char *seckey) { - rustsecp256k1_v0_9_2_gej pj; +static int rustsecp256k1_v0_10_0_ec_pubkey_create_helper(const rustsecp256k1_v0_10_0_ecmult_gen_context *ecmult_gen_ctx, rustsecp256k1_v0_10_0_scalar *seckey_scalar, rustsecp256k1_v0_10_0_ge *p, const unsigned char *seckey) { + rustsecp256k1_v0_10_0_gej pj; int ret; - ret = rustsecp256k1_v0_9_2_scalar_set_b32_seckey(seckey_scalar, seckey); - rustsecp256k1_v0_9_2_scalar_cmov(seckey_scalar, &rustsecp256k1_v0_9_2_scalar_one, !ret); + ret = rustsecp256k1_v0_10_0_scalar_set_b32_seckey(seckey_scalar, seckey); + rustsecp256k1_v0_10_0_scalar_cmov(seckey_scalar, &rustsecp256k1_v0_10_0_scalar_one, !ret); - rustsecp256k1_v0_9_2_ecmult_gen(ecmult_gen_ctx, &pj, seckey_scalar); - rustsecp256k1_v0_9_2_ge_set_gej(p, &pj); + rustsecp256k1_v0_10_0_ecmult_gen(ecmult_gen_ctx, &pj, seckey_scalar); + rustsecp256k1_v0_10_0_ge_set_gej(p, &pj); return ret; } -int rustsecp256k1_v0_9_2_ec_pubkey_create(const rustsecp256k1_v0_9_2_context* ctx, rustsecp256k1_v0_9_2_pubkey *pubkey, const unsigned char *seckey) { - rustsecp256k1_v0_9_2_ge p; - rustsecp256k1_v0_9_2_scalar seckey_scalar; +int rustsecp256k1_v0_10_0_ec_pubkey_create(const rustsecp256k1_v0_10_0_context* ctx, rustsecp256k1_v0_10_0_pubkey *pubkey, const unsigned char *seckey) { + rustsecp256k1_v0_10_0_ge p; + rustsecp256k1_v0_10_0_scalar seckey_scalar; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); memset(pubkey, 0, sizeof(*pubkey)); - ARG_CHECK(rustsecp256k1_v0_9_2_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); + ARG_CHECK(rustsecp256k1_v0_10_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); ARG_CHECK(seckey != NULL); - ret = rustsecp256k1_v0_9_2_ec_pubkey_create_helper(&ctx->ecmult_gen_ctx, &seckey_scalar, &p, seckey); - rustsecp256k1_v0_9_2_pubkey_save(pubkey, &p); - rustsecp256k1_v0_9_2_memczero(pubkey, sizeof(*pubkey), !ret); + ret = rustsecp256k1_v0_10_0_ec_pubkey_create_helper(&ctx->ecmult_gen_ctx, &seckey_scalar, &p, seckey); + rustsecp256k1_v0_10_0_pubkey_save(pubkey, &p); + rustsecp256k1_v0_10_0_memczero(pubkey, sizeof(*pubkey), !ret); - rustsecp256k1_v0_9_2_scalar_clear(&seckey_scalar); + rustsecp256k1_v0_10_0_scalar_clear(&seckey_scalar); return ret; } -int rustsecp256k1_v0_9_2_ec_seckey_negate(const rustsecp256k1_v0_9_2_context* ctx, unsigned char *seckey) { - rustsecp256k1_v0_9_2_scalar sec; +int rustsecp256k1_v0_10_0_ec_seckey_negate(const rustsecp256k1_v0_10_0_context* ctx, unsigned char *seckey) { + rustsecp256k1_v0_10_0_scalar sec; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); - ret = rustsecp256k1_v0_9_2_scalar_set_b32_seckey(&sec, seckey); - rustsecp256k1_v0_9_2_scalar_cmov(&sec, &rustsecp256k1_v0_9_2_scalar_zero, !ret); - rustsecp256k1_v0_9_2_scalar_negate(&sec, &sec); - rustsecp256k1_v0_9_2_scalar_get_b32(seckey, &sec); + ret = rustsecp256k1_v0_10_0_scalar_set_b32_seckey(&sec, seckey); + rustsecp256k1_v0_10_0_scalar_cmov(&sec, &rustsecp256k1_v0_10_0_scalar_zero, !ret); + rustsecp256k1_v0_10_0_scalar_negate(&sec, &sec); + rustsecp256k1_v0_10_0_scalar_get_b32(seckey, &sec); - rustsecp256k1_v0_9_2_scalar_clear(&sec); + rustsecp256k1_v0_10_0_scalar_clear(&sec); return ret; } -int rustsecp256k1_v0_9_2_ec_privkey_negate(const rustsecp256k1_v0_9_2_context* ctx, unsigned char *seckey) { - return rustsecp256k1_v0_9_2_ec_seckey_negate(ctx, seckey); +int rustsecp256k1_v0_10_0_ec_privkey_negate(const rustsecp256k1_v0_10_0_context* ctx, unsigned char *seckey) { + return rustsecp256k1_v0_10_0_ec_seckey_negate(ctx, seckey); } -int rustsecp256k1_v0_9_2_ec_pubkey_negate(const rustsecp256k1_v0_9_2_context* ctx, rustsecp256k1_v0_9_2_pubkey *pubkey) { +int rustsecp256k1_v0_10_0_ec_pubkey_negate(const rustsecp256k1_v0_10_0_context* ctx, rustsecp256k1_v0_10_0_pubkey *pubkey) { int ret = 0; - rustsecp256k1_v0_9_2_ge p; + rustsecp256k1_v0_10_0_ge p; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); - ret = rustsecp256k1_v0_9_2_pubkey_load(ctx, &p, pubkey); + ret = rustsecp256k1_v0_10_0_pubkey_load(ctx, &p, pubkey); memset(pubkey, 0, sizeof(*pubkey)); if (ret) { - rustsecp256k1_v0_9_2_ge_neg(&p, &p); - rustsecp256k1_v0_9_2_pubkey_save(pubkey, &p); + rustsecp256k1_v0_10_0_ge_neg(&p, &p); + rustsecp256k1_v0_10_0_pubkey_save(pubkey, &p); } return ret; } -static int rustsecp256k1_v0_9_2_ec_seckey_tweak_add_helper(rustsecp256k1_v0_9_2_scalar *sec, const unsigned char *tweak32) { - rustsecp256k1_v0_9_2_scalar term; +static int rustsecp256k1_v0_10_0_ec_seckey_tweak_add_helper(rustsecp256k1_v0_10_0_scalar *sec, const unsigned char *tweak32) { + rustsecp256k1_v0_10_0_scalar term; int overflow = 0; int ret = 0; - rustsecp256k1_v0_9_2_scalar_set_b32(&term, tweak32, &overflow); - ret = (!overflow) & rustsecp256k1_v0_9_2_eckey_privkey_tweak_add(sec, &term); - rustsecp256k1_v0_9_2_scalar_clear(&term); + rustsecp256k1_v0_10_0_scalar_set_b32(&term, tweak32, &overflow); + ret = (!overflow) & rustsecp256k1_v0_10_0_eckey_privkey_tweak_add(sec, &term); + rustsecp256k1_v0_10_0_scalar_clear(&term); return ret; } -int rustsecp256k1_v0_9_2_ec_seckey_tweak_add(const rustsecp256k1_v0_9_2_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { - rustsecp256k1_v0_9_2_scalar sec; +int rustsecp256k1_v0_10_0_ec_seckey_tweak_add(const rustsecp256k1_v0_10_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { + rustsecp256k1_v0_10_0_scalar sec; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); ARG_CHECK(tweak32 != NULL); - ret = rustsecp256k1_v0_9_2_scalar_set_b32_seckey(&sec, seckey); - ret &= rustsecp256k1_v0_9_2_ec_seckey_tweak_add_helper(&sec, tweak32); - rustsecp256k1_v0_9_2_scalar_cmov(&sec, &rustsecp256k1_v0_9_2_scalar_zero, !ret); - rustsecp256k1_v0_9_2_scalar_get_b32(seckey, &sec); + ret = rustsecp256k1_v0_10_0_scalar_set_b32_seckey(&sec, seckey); + ret &= rustsecp256k1_v0_10_0_ec_seckey_tweak_add_helper(&sec, tweak32); + rustsecp256k1_v0_10_0_scalar_cmov(&sec, &rustsecp256k1_v0_10_0_scalar_zero, !ret); + rustsecp256k1_v0_10_0_scalar_get_b32(seckey, &sec); - rustsecp256k1_v0_9_2_scalar_clear(&sec); + rustsecp256k1_v0_10_0_scalar_clear(&sec); return ret; } -int rustsecp256k1_v0_9_2_ec_privkey_tweak_add(const rustsecp256k1_v0_9_2_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { - return rustsecp256k1_v0_9_2_ec_seckey_tweak_add(ctx, seckey, tweak32); +int rustsecp256k1_v0_10_0_ec_privkey_tweak_add(const rustsecp256k1_v0_10_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { + return rustsecp256k1_v0_10_0_ec_seckey_tweak_add(ctx, seckey, tweak32); } -static int rustsecp256k1_v0_9_2_ec_pubkey_tweak_add_helper(rustsecp256k1_v0_9_2_ge *p, const unsigned char *tweak32) { - rustsecp256k1_v0_9_2_scalar term; +static int rustsecp256k1_v0_10_0_ec_pubkey_tweak_add_helper(rustsecp256k1_v0_10_0_ge *p, const unsigned char *tweak32) { + rustsecp256k1_v0_10_0_scalar term; int overflow = 0; - rustsecp256k1_v0_9_2_scalar_set_b32(&term, tweak32, &overflow); - return !overflow && rustsecp256k1_v0_9_2_eckey_pubkey_tweak_add(p, &term); + rustsecp256k1_v0_10_0_scalar_set_b32(&term, tweak32, &overflow); + return !overflow && rustsecp256k1_v0_10_0_eckey_pubkey_tweak_add(p, &term); } -int rustsecp256k1_v0_9_2_ec_pubkey_tweak_add(const rustsecp256k1_v0_9_2_context* ctx, rustsecp256k1_v0_9_2_pubkey *pubkey, const unsigned char *tweak32) { - rustsecp256k1_v0_9_2_ge p; +int rustsecp256k1_v0_10_0_ec_pubkey_tweak_add(const rustsecp256k1_v0_10_0_context* ctx, rustsecp256k1_v0_10_0_pubkey *pubkey, const unsigned char *tweak32) { + rustsecp256k1_v0_10_0_ge p; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); ARG_CHECK(tweak32 != NULL); - ret = rustsecp256k1_v0_9_2_pubkey_load(ctx, &p, pubkey); + ret = rustsecp256k1_v0_10_0_pubkey_load(ctx, &p, pubkey); memset(pubkey, 0, sizeof(*pubkey)); - ret = ret && rustsecp256k1_v0_9_2_ec_pubkey_tweak_add_helper(&p, tweak32); + ret = ret && rustsecp256k1_v0_10_0_ec_pubkey_tweak_add_helper(&p, tweak32); if (ret) { - rustsecp256k1_v0_9_2_pubkey_save(pubkey, &p); + rustsecp256k1_v0_10_0_pubkey_save(pubkey, &p); } return ret; } -int rustsecp256k1_v0_9_2_ec_seckey_tweak_mul(const rustsecp256k1_v0_9_2_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { - rustsecp256k1_v0_9_2_scalar factor; - rustsecp256k1_v0_9_2_scalar sec; +int rustsecp256k1_v0_10_0_ec_seckey_tweak_mul(const rustsecp256k1_v0_10_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { + rustsecp256k1_v0_10_0_scalar factor; + rustsecp256k1_v0_10_0_scalar sec; int ret = 0; int overflow = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); ARG_CHECK(tweak32 != NULL); - rustsecp256k1_v0_9_2_scalar_set_b32(&factor, tweak32, &overflow); - ret = rustsecp256k1_v0_9_2_scalar_set_b32_seckey(&sec, seckey); - ret &= (!overflow) & rustsecp256k1_v0_9_2_eckey_privkey_tweak_mul(&sec, &factor); - rustsecp256k1_v0_9_2_scalar_cmov(&sec, &rustsecp256k1_v0_9_2_scalar_zero, !ret); - rustsecp256k1_v0_9_2_scalar_get_b32(seckey, &sec); + rustsecp256k1_v0_10_0_scalar_set_b32(&factor, tweak32, &overflow); + ret = rustsecp256k1_v0_10_0_scalar_set_b32_seckey(&sec, seckey); + ret &= (!overflow) & rustsecp256k1_v0_10_0_eckey_privkey_tweak_mul(&sec, &factor); + rustsecp256k1_v0_10_0_scalar_cmov(&sec, &rustsecp256k1_v0_10_0_scalar_zero, !ret); + rustsecp256k1_v0_10_0_scalar_get_b32(seckey, &sec); - rustsecp256k1_v0_9_2_scalar_clear(&sec); - rustsecp256k1_v0_9_2_scalar_clear(&factor); + rustsecp256k1_v0_10_0_scalar_clear(&sec); + rustsecp256k1_v0_10_0_scalar_clear(&factor); return ret; } -int rustsecp256k1_v0_9_2_ec_privkey_tweak_mul(const rustsecp256k1_v0_9_2_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { - return rustsecp256k1_v0_9_2_ec_seckey_tweak_mul(ctx, seckey, tweak32); +int rustsecp256k1_v0_10_0_ec_privkey_tweak_mul(const rustsecp256k1_v0_10_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { + return rustsecp256k1_v0_10_0_ec_seckey_tweak_mul(ctx, seckey, tweak32); } -int rustsecp256k1_v0_9_2_ec_pubkey_tweak_mul(const rustsecp256k1_v0_9_2_context* ctx, rustsecp256k1_v0_9_2_pubkey *pubkey, const unsigned char *tweak32) { - rustsecp256k1_v0_9_2_ge p; - rustsecp256k1_v0_9_2_scalar factor; +int rustsecp256k1_v0_10_0_ec_pubkey_tweak_mul(const rustsecp256k1_v0_10_0_context* ctx, rustsecp256k1_v0_10_0_pubkey *pubkey, const unsigned char *tweak32) { + rustsecp256k1_v0_10_0_ge p; + rustsecp256k1_v0_10_0_scalar factor; int ret = 0; int overflow = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); ARG_CHECK(tweak32 != NULL); - rustsecp256k1_v0_9_2_scalar_set_b32(&factor, tweak32, &overflow); - ret = !overflow && rustsecp256k1_v0_9_2_pubkey_load(ctx, &p, pubkey); + rustsecp256k1_v0_10_0_scalar_set_b32(&factor, tweak32, &overflow); + ret = !overflow && rustsecp256k1_v0_10_0_pubkey_load(ctx, &p, pubkey); memset(pubkey, 0, sizeof(*pubkey)); if (ret) { - if (rustsecp256k1_v0_9_2_eckey_pubkey_tweak_mul(&p, &factor)) { - rustsecp256k1_v0_9_2_pubkey_save(pubkey, &p); + if (rustsecp256k1_v0_10_0_eckey_pubkey_tweak_mul(&p, &factor)) { + rustsecp256k1_v0_10_0_pubkey_save(pubkey, &p); } else { ret = 0; } @@ -701,20 +701,20 @@ int rustsecp256k1_v0_9_2_ec_pubkey_tweak_mul(const rustsecp256k1_v0_9_2_context* return ret; } -int rustsecp256k1_v0_9_2_context_randomize(rustsecp256k1_v0_9_2_context* ctx, const unsigned char *seed32) { +int rustsecp256k1_v0_10_0_context_randomize(rustsecp256k1_v0_10_0_context* ctx, const unsigned char *seed32) { VERIFY_CHECK(ctx != NULL); - ARG_CHECK(rustsecp256k1_v0_9_2_context_is_proper(ctx)); + ARG_CHECK(rustsecp256k1_v0_10_0_context_is_proper(ctx)); - if (rustsecp256k1_v0_9_2_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)) { - rustsecp256k1_v0_9_2_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32); + if (rustsecp256k1_v0_10_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)) { + rustsecp256k1_v0_10_0_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32); } return 1; } -int rustsecp256k1_v0_9_2_ec_pubkey_combine(const rustsecp256k1_v0_9_2_context* ctx, rustsecp256k1_v0_9_2_pubkey *pubnonce, const rustsecp256k1_v0_9_2_pubkey * const *pubnonces, size_t n) { +int rustsecp256k1_v0_10_0_ec_pubkey_combine(const rustsecp256k1_v0_10_0_context* ctx, rustsecp256k1_v0_10_0_pubkey *pubnonce, const rustsecp256k1_v0_10_0_pubkey * const *pubnonces, size_t n) { size_t i; - rustsecp256k1_v0_9_2_gej Qj; - rustsecp256k1_v0_9_2_ge Q; + rustsecp256k1_v0_10_0_gej Qj; + rustsecp256k1_v0_10_0_ge Q; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubnonce != NULL); @@ -722,31 +722,31 @@ int rustsecp256k1_v0_9_2_ec_pubkey_combine(const rustsecp256k1_v0_9_2_context* c ARG_CHECK(n >= 1); ARG_CHECK(pubnonces != NULL); - rustsecp256k1_v0_9_2_gej_set_infinity(&Qj); + rustsecp256k1_v0_10_0_gej_set_infinity(&Qj); for (i = 0; i < n; i++) { ARG_CHECK(pubnonces[i] != NULL); - rustsecp256k1_v0_9_2_pubkey_load(ctx, &Q, pubnonces[i]); - rustsecp256k1_v0_9_2_gej_add_ge(&Qj, &Qj, &Q); + rustsecp256k1_v0_10_0_pubkey_load(ctx, &Q, pubnonces[i]); + rustsecp256k1_v0_10_0_gej_add_ge(&Qj, &Qj, &Q); } - if (rustsecp256k1_v0_9_2_gej_is_infinity(&Qj)) { + if (rustsecp256k1_v0_10_0_gej_is_infinity(&Qj)) { return 0; } - rustsecp256k1_v0_9_2_ge_set_gej(&Q, &Qj); - rustsecp256k1_v0_9_2_pubkey_save(pubnonce, &Q); + rustsecp256k1_v0_10_0_ge_set_gej(&Q, &Qj); + rustsecp256k1_v0_10_0_pubkey_save(pubnonce, &Q); return 1; } -int rustsecp256k1_v0_9_2_tagged_sha256(const rustsecp256k1_v0_9_2_context* ctx, unsigned char *hash32, const unsigned char *tag, size_t taglen, const unsigned char *msg, size_t msglen) { - rustsecp256k1_v0_9_2_sha256 sha; +int rustsecp256k1_v0_10_0_tagged_sha256(const rustsecp256k1_v0_10_0_context* ctx, unsigned char *hash32, const unsigned char *tag, size_t taglen, const unsigned char *msg, size_t msglen) { + rustsecp256k1_v0_10_0_sha256 sha; VERIFY_CHECK(ctx != NULL); ARG_CHECK(hash32 != NULL); ARG_CHECK(tag != NULL); ARG_CHECK(msg != NULL); - rustsecp256k1_v0_9_2_sha256_initialize_tagged(&sha, tag, taglen); - rustsecp256k1_v0_9_2_sha256_write(&sha, msg, msglen); - rustsecp256k1_v0_9_2_sha256_finalize(&sha, hash32); + rustsecp256k1_v0_10_0_sha256_initialize_tagged(&sha, tag, taglen); + rustsecp256k1_v0_10_0_sha256_write(&sha, msg, msglen); + rustsecp256k1_v0_10_0_sha256_finalize(&sha, hash32); return 1; } diff --git a/secp256k1-sys/depend/secp256k1/src/selftest.h b/secp256k1-sys/depend/secp256k1/src/selftest.h index 11e2cf28b..621765fe9 100644 --- a/secp256k1-sys/depend/secp256k1/src/selftest.h +++ b/secp256k1-sys/depend/secp256k1/src/selftest.h @@ -11,22 +11,22 @@ #include -static int rustsecp256k1_v0_9_2_selftest_sha256(void) { +static int rustsecp256k1_v0_10_0_selftest_sha256(void) { static const char *input63 = "For this sample, this 63-byte string will be used as input data"; static const unsigned char output32[32] = { 0xf0, 0x8a, 0x78, 0xcb, 0xba, 0xee, 0x08, 0x2b, 0x05, 0x2a, 0xe0, 0x70, 0x8f, 0x32, 0xfa, 0x1e, 0x50, 0xc5, 0xc4, 0x21, 0xaa, 0x77, 0x2b, 0xa5, 0xdb, 0xb4, 0x06, 0xa2, 0xea, 0x6b, 0xe3, 0x42, }; unsigned char out[32]; - rustsecp256k1_v0_9_2_sha256 hasher; - rustsecp256k1_v0_9_2_sha256_initialize(&hasher); - rustsecp256k1_v0_9_2_sha256_write(&hasher, (const unsigned char*)input63, 63); - rustsecp256k1_v0_9_2_sha256_finalize(&hasher, out); - return rustsecp256k1_v0_9_2_memcmp_var(out, output32, 32) == 0; + rustsecp256k1_v0_10_0_sha256 hasher; + rustsecp256k1_v0_10_0_sha256_initialize(&hasher); + rustsecp256k1_v0_10_0_sha256_write(&hasher, (const unsigned char*)input63, 63); + rustsecp256k1_v0_10_0_sha256_finalize(&hasher, out); + return rustsecp256k1_v0_10_0_memcmp_var(out, output32, 32) == 0; } -static int rustsecp256k1_v0_9_2_selftest_passes(void) { - return rustsecp256k1_v0_9_2_selftest_sha256(); +static int rustsecp256k1_v0_10_0_selftest_passes(void) { + return rustsecp256k1_v0_10_0_selftest_sha256(); } #endif /* SECP256K1_SELFTEST_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/testrand.h b/secp256k1-sys/depend/secp256k1/src/testrand.h index 2786b2289..362618028 100644 --- a/secp256k1-sys/depend/secp256k1/src/testrand.h +++ b/secp256k1-sys/depend/secp256k1/src/testrand.h @@ -12,37 +12,37 @@ /* A non-cryptographic RNG used only for test infrastructure. */ /** Seed the pseudorandom number generator for testing. */ -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_testrand_seed(const unsigned char *seed16); +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_testrand_seed(const unsigned char *seed16); /** Generate a pseudorandom number in the range [0..2**32-1]. */ -SECP256K1_INLINE static uint32_t rustsecp256k1_v0_9_2_testrand32(void); +SECP256K1_INLINE static uint32_t rustsecp256k1_v0_10_0_testrand32(void); /** Generate a pseudorandom number in the range [0..2**64-1]. */ -SECP256K1_INLINE static uint64_t rustsecp256k1_v0_9_2_testrand64(void); +SECP256K1_INLINE static uint64_t rustsecp256k1_v0_10_0_testrand64(void); /** Generate a pseudorandom number in the range [0..2**bits-1]. Bits must be 1 or * more. */ -SECP256K1_INLINE static uint64_t rustsecp256k1_v0_9_2_testrand_bits(int bits); +SECP256K1_INLINE static uint64_t rustsecp256k1_v0_10_0_testrand_bits(int bits); /** Generate a pseudorandom number in the range [0..range-1]. */ -static uint32_t rustsecp256k1_v0_9_2_testrand_int(uint32_t range); +static uint32_t rustsecp256k1_v0_10_0_testrand_int(uint32_t range); /** Generate a pseudorandom 32-byte array. */ -static void rustsecp256k1_v0_9_2_testrand256(unsigned char *b32); +static void rustsecp256k1_v0_10_0_testrand256(unsigned char *b32); /** Generate a pseudorandom 32-byte array with long sequences of zero and one bits. */ -static void rustsecp256k1_v0_9_2_testrand256_test(unsigned char *b32); +static void rustsecp256k1_v0_10_0_testrand256_test(unsigned char *b32); /** Generate pseudorandom bytes with long sequences of zero and one bits. */ -static void rustsecp256k1_v0_9_2_testrand_bytes_test(unsigned char *bytes, size_t len); +static void rustsecp256k1_v0_10_0_testrand_bytes_test(unsigned char *bytes, size_t len); /** Flip a single random bit in a byte array */ -static void rustsecp256k1_v0_9_2_testrand_flip(unsigned char *b, size_t len); +static void rustsecp256k1_v0_10_0_testrand_flip(unsigned char *b, size_t len); /** Initialize the test RNG using (hex encoded) array up to 16 bytes, or randomly if hexseed is NULL. */ -static void rustsecp256k1_v0_9_2_testrand_init(const char* hexseed); +static void rustsecp256k1_v0_10_0_testrand_init(const char* hexseed); /** Print final test information. */ -static void rustsecp256k1_v0_9_2_testrand_finish(void); +static void rustsecp256k1_v0_10_0_testrand_finish(void); #endif /* SECP256K1_TESTRAND_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/testrand_impl.h b/secp256k1-sys/depend/secp256k1/src/testrand_impl.h index 7d1773aa2..a75ba04cc 100644 --- a/secp256k1-sys/depend/secp256k1/src/testrand_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/testrand_impl.h @@ -15,24 +15,24 @@ #include "hash.h" #include "util.h" -static uint64_t rustsecp256k1_v0_9_2_test_state[4]; +static uint64_t rustsecp256k1_v0_10_0_test_state[4]; -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_testrand_seed(const unsigned char *seed16) { +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_testrand_seed(const unsigned char *seed16) { static const unsigned char PREFIX[19] = "secp256k1 test init"; unsigned char out32[32]; - rustsecp256k1_v0_9_2_sha256 hash; + rustsecp256k1_v0_10_0_sha256 hash; int i; /* Use SHA256(PREFIX || seed16) as initial state. */ - rustsecp256k1_v0_9_2_sha256_initialize(&hash); - rustsecp256k1_v0_9_2_sha256_write(&hash, PREFIX, sizeof(PREFIX)); - rustsecp256k1_v0_9_2_sha256_write(&hash, seed16, 16); - rustsecp256k1_v0_9_2_sha256_finalize(&hash, out32); + rustsecp256k1_v0_10_0_sha256_initialize(&hash); + rustsecp256k1_v0_10_0_sha256_write(&hash, PREFIX, sizeof(PREFIX)); + rustsecp256k1_v0_10_0_sha256_write(&hash, seed16, 16); + rustsecp256k1_v0_10_0_sha256_finalize(&hash, out32); for (i = 0; i < 4; ++i) { uint64_t s = 0; int j; for (j = 0; j < 8; ++j) s = (s << 8) | out32[8*i + j]; - rustsecp256k1_v0_9_2_test_state[i] = s; + rustsecp256k1_v0_10_0_test_state[i] = s; } } @@ -40,29 +40,29 @@ SECP256K1_INLINE static uint64_t rotl(const uint64_t x, int k) { return (x << k) | (x >> (64 - k)); } -SECP256K1_INLINE static uint64_t rustsecp256k1_v0_9_2_testrand64(void) { +SECP256K1_INLINE static uint64_t rustsecp256k1_v0_10_0_testrand64(void) { /* Test-only Xoshiro256++ RNG. See https://prng.di.unimi.it/ */ - const uint64_t result = rotl(rustsecp256k1_v0_9_2_test_state[0] + rustsecp256k1_v0_9_2_test_state[3], 23) + rustsecp256k1_v0_9_2_test_state[0]; - const uint64_t t = rustsecp256k1_v0_9_2_test_state[1] << 17; - rustsecp256k1_v0_9_2_test_state[2] ^= rustsecp256k1_v0_9_2_test_state[0]; - rustsecp256k1_v0_9_2_test_state[3] ^= rustsecp256k1_v0_9_2_test_state[1]; - rustsecp256k1_v0_9_2_test_state[1] ^= rustsecp256k1_v0_9_2_test_state[2]; - rustsecp256k1_v0_9_2_test_state[0] ^= rustsecp256k1_v0_9_2_test_state[3]; - rustsecp256k1_v0_9_2_test_state[2] ^= t; - rustsecp256k1_v0_9_2_test_state[3] = rotl(rustsecp256k1_v0_9_2_test_state[3], 45); + const uint64_t result = rotl(rustsecp256k1_v0_10_0_test_state[0] + rustsecp256k1_v0_10_0_test_state[3], 23) + rustsecp256k1_v0_10_0_test_state[0]; + const uint64_t t = rustsecp256k1_v0_10_0_test_state[1] << 17; + rustsecp256k1_v0_10_0_test_state[2] ^= rustsecp256k1_v0_10_0_test_state[0]; + rustsecp256k1_v0_10_0_test_state[3] ^= rustsecp256k1_v0_10_0_test_state[1]; + rustsecp256k1_v0_10_0_test_state[1] ^= rustsecp256k1_v0_10_0_test_state[2]; + rustsecp256k1_v0_10_0_test_state[0] ^= rustsecp256k1_v0_10_0_test_state[3]; + rustsecp256k1_v0_10_0_test_state[2] ^= t; + rustsecp256k1_v0_10_0_test_state[3] = rotl(rustsecp256k1_v0_10_0_test_state[3], 45); return result; } -SECP256K1_INLINE static uint64_t rustsecp256k1_v0_9_2_testrand_bits(int bits) { +SECP256K1_INLINE static uint64_t rustsecp256k1_v0_10_0_testrand_bits(int bits) { if (bits == 0) return 0; - return rustsecp256k1_v0_9_2_testrand64() >> (64 - bits); + return rustsecp256k1_v0_10_0_testrand64() >> (64 - bits); } -SECP256K1_INLINE static uint32_t rustsecp256k1_v0_9_2_testrand32(void) { - return rustsecp256k1_v0_9_2_testrand64() >> 32; +SECP256K1_INLINE static uint32_t rustsecp256k1_v0_10_0_testrand32(void) { + return rustsecp256k1_v0_10_0_testrand64() >> 32; } -static uint32_t rustsecp256k1_v0_9_2_testrand_int(uint32_t range) { +static uint32_t rustsecp256k1_v0_10_0_testrand_int(uint32_t range) { uint32_t mask = 0; uint32_t range_copy; /* Reduce range by 1, changing its meaning to "maximum value". */ @@ -76,15 +76,15 @@ static uint32_t rustsecp256k1_v0_9_2_testrand_int(uint32_t range) { } /* Generation loop. */ while (1) { - uint32_t val = rustsecp256k1_v0_9_2_testrand64() & mask; + uint32_t val = rustsecp256k1_v0_10_0_testrand64() & mask; if (val <= range) return val; } } -static void rustsecp256k1_v0_9_2_testrand256(unsigned char *b32) { +static void rustsecp256k1_v0_10_0_testrand256(unsigned char *b32) { int i; for (i = 0; i < 4; ++i) { - uint64_t val = rustsecp256k1_v0_9_2_testrand64(); + uint64_t val = rustsecp256k1_v0_10_0_testrand64(); b32[0] = val; b32[1] = val >> 8; b32[2] = val >> 16; @@ -97,14 +97,14 @@ static void rustsecp256k1_v0_9_2_testrand256(unsigned char *b32) { } } -static void rustsecp256k1_v0_9_2_testrand_bytes_test(unsigned char *bytes, size_t len) { +static void rustsecp256k1_v0_10_0_testrand_bytes_test(unsigned char *bytes, size_t len) { size_t bits = 0; memset(bytes, 0, len); while (bits < len * 8) { int now; uint32_t val; - now = 1 + (rustsecp256k1_v0_9_2_testrand_bits(6) * rustsecp256k1_v0_9_2_testrand_bits(5) + 16) / 31; - val = rustsecp256k1_v0_9_2_testrand_bits(1); + now = 1 + (rustsecp256k1_v0_10_0_testrand_bits(6) * rustsecp256k1_v0_10_0_testrand_bits(5) + 16) / 31; + val = rustsecp256k1_v0_10_0_testrand_bits(1); while (now > 0 && bits < len * 8) { bytes[bits / 8] |= val << (bits % 8); now--; @@ -113,15 +113,15 @@ static void rustsecp256k1_v0_9_2_testrand_bytes_test(unsigned char *bytes, size_ } } -static void rustsecp256k1_v0_9_2_testrand256_test(unsigned char *b32) { - rustsecp256k1_v0_9_2_testrand_bytes_test(b32, 32); +static void rustsecp256k1_v0_10_0_testrand256_test(unsigned char *b32) { + rustsecp256k1_v0_10_0_testrand_bytes_test(b32, 32); } -static void rustsecp256k1_v0_9_2_testrand_flip(unsigned char *b, size_t len) { - b[rustsecp256k1_v0_9_2_testrand_int(len)] ^= (1 << rustsecp256k1_v0_9_2_testrand_bits(3)); +static void rustsecp256k1_v0_10_0_testrand_flip(unsigned char *b, size_t len) { + b[rustsecp256k1_v0_10_0_testrand_int(len)] ^= (1 << rustsecp256k1_v0_10_0_testrand_bits(3)); } -static void rustsecp256k1_v0_9_2_testrand_init(const char* hexseed) { +static void rustsecp256k1_v0_10_0_testrand_init(const char* hexseed) { unsigned char seed16[16] = {0}; if (hexseed && strlen(hexseed) != 0) { int pos = 0; @@ -155,12 +155,12 @@ static void rustsecp256k1_v0_9_2_testrand_init(const char* hexseed) { } printf("random seed = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", seed16[0], seed16[1], seed16[2], seed16[3], seed16[4], seed16[5], seed16[6], seed16[7], seed16[8], seed16[9], seed16[10], seed16[11], seed16[12], seed16[13], seed16[14], seed16[15]); - rustsecp256k1_v0_9_2_testrand_seed(seed16); + rustsecp256k1_v0_10_0_testrand_seed(seed16); } -static void rustsecp256k1_v0_9_2_testrand_finish(void) { +static void rustsecp256k1_v0_10_0_testrand_finish(void) { unsigned char run32[32]; - rustsecp256k1_v0_9_2_testrand256(run32); + rustsecp256k1_v0_10_0_testrand256(run32); printf("random run = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", run32[0], run32[1], run32[2], run32[3], run32[4], run32[5], run32[6], run32[7], run32[8], run32[9], run32[10], run32[11], run32[12], run32[13], run32[14], run32[15]); } diff --git a/secp256k1-sys/depend/secp256k1/src/tests.c b/secp256k1-sys/depend/secp256k1/src/tests.c index 388f72eb7..1b6e65496 100644 --- a/secp256k1-sys/depend/secp256k1/src/tests.c +++ b/secp256k1-sys/depend/secp256k1/src/tests.c @@ -23,6 +23,7 @@ #include "../include/secp256k1_preallocated.h" #include "testrand_impl.h" #include "checkmem.h" +#include "testutil.h" #include "util.h" #include "../contrib/lax_der_parsing.c" @@ -37,8 +38,8 @@ #define CONDITIONAL_TEST(cnt, nam) if (COUNT < (cnt)) { printf("Skipping %s (iteration count too low)\n", nam); } else static int COUNT = 64; -static rustsecp256k1_v0_9_2_context *CTX = NULL; -static rustsecp256k1_v0_9_2_context *STATIC_CTX = NULL; +static rustsecp256k1_v0_10_0_context *CTX = NULL; +static rustsecp256k1_v0_10_0_context *STATIC_CTX = NULL; static int all_bytes_equal(const void* s, unsigned char value, size_t n) { const unsigned char *p = s; @@ -52,26 +53,32 @@ static int all_bytes_equal(const void* s, unsigned char value, size_t n) { return 1; } -/* TODO Use CHECK_ILLEGAL(_VOID) everywhere and get rid of the uncounting callback */ -/* CHECK that expr_or_stmt calls the illegal callback of ctx exactly once - * - * For checking functions that use ARG_CHECK_VOID */ -#define CHECK_ILLEGAL_VOID(ctx, expr_or_stmt) do { \ - int32_t _calls_to_illegal_callback = 0; \ - rustsecp256k1_v0_9_2_callback _saved_illegal_cb = ctx->illegal_callback; \ - rustsecp256k1_v0_9_2_context_set_illegal_callback(ctx, \ - counting_illegal_callback_fn, &_calls_to_illegal_callback); \ +#define CHECK_COUNTING_CALLBACK_VOID(ctx, expr_or_stmt, callback, callback_setter) do { \ + int32_t _calls_to_callback = 0; \ + rustsecp256k1_v0_10_0_callback _saved_callback = ctx->callback; \ + callback_setter(ctx, counting_callback_fn, &_calls_to_callback); \ { expr_or_stmt; } \ - ctx->illegal_callback = _saved_illegal_cb; \ - CHECK(_calls_to_illegal_callback == 1); \ + ctx->callback = _saved_callback; \ + CHECK(_calls_to_callback == 1); \ } while(0); -/* CHECK that expr calls the illegal callback of ctx exactly once and that expr == 0 +/* CHECK that expr_or_stmt calls the error or illegal callback of ctx exactly once + * + * Useful for checking functions that return void (e.g., API functions that use ARG_CHECK_VOID) */ +#define CHECK_ERROR_VOID(ctx, expr_or_stmt) \ + CHECK_COUNTING_CALLBACK_VOID(ctx, expr_or_stmt, error_callback, rustsecp256k1_v0_10_0_context_set_error_callback) +#define CHECK_ILLEGAL_VOID(ctx, expr_or_stmt) \ + CHECK_COUNTING_CALLBACK_VOID(ctx, expr_or_stmt, illegal_callback, rustsecp256k1_v0_10_0_context_set_illegal_callback) + +/* CHECK that + * - expr calls the illegal callback of ctx exactly once and, + * - expr == 0 (or equivalently, expr == NULL) * - * For checking functions that use ARG_CHECK */ + * Useful for checking functions that return an integer or a pointer. */ #define CHECK_ILLEGAL(ctx, expr) CHECK_ILLEGAL_VOID(ctx, CHECK((expr) == 0)) +#define CHECK_ERROR(ctx, expr) CHECK_ERROR_VOID(ctx, CHECK((expr) == 0)) -static void counting_illegal_callback_fn(const char* str, void* data) { +static void counting_callback_fn(const char* str, void* data) { /* Dummy callback function that just counts. */ int32_t *p; (void)str; @@ -89,110 +96,110 @@ static void uncounting_illegal_callback_fn(const char* str, void* data) { (*p)--; } -static void random_field_element_magnitude(rustsecp256k1_v0_9_2_fe *fe, int m) { - rustsecp256k1_v0_9_2_fe zero; - int n = rustsecp256k1_v0_9_2_testrand_int(m + 1); - rustsecp256k1_v0_9_2_fe_normalize(fe); +static void random_field_element_magnitude(rustsecp256k1_v0_10_0_fe *fe, int m) { + rustsecp256k1_v0_10_0_fe zero; + int n = rustsecp256k1_v0_10_0_testrand_int(m + 1); + rustsecp256k1_v0_10_0_fe_normalize(fe); if (n == 0) { return; } - rustsecp256k1_v0_9_2_fe_clear(&zero); - rustsecp256k1_v0_9_2_fe_negate(&zero, &zero, 0); - rustsecp256k1_v0_9_2_fe_mul_int_unchecked(&zero, n - 1); - rustsecp256k1_v0_9_2_fe_add(fe, &zero); + rustsecp256k1_v0_10_0_fe_clear(&zero); + rustsecp256k1_v0_10_0_fe_negate(&zero, &zero, 0); + rustsecp256k1_v0_10_0_fe_mul_int_unchecked(&zero, n - 1); + rustsecp256k1_v0_10_0_fe_add(fe, &zero); #ifdef VERIFY CHECK(fe->magnitude == n); #endif } -static void random_fe_test(rustsecp256k1_v0_9_2_fe *x) { +static void random_fe_test(rustsecp256k1_v0_10_0_fe *x) { unsigned char bin[32]; do { - rustsecp256k1_v0_9_2_testrand256_test(bin); - if (rustsecp256k1_v0_9_2_fe_set_b32_limit(x, bin)) { + rustsecp256k1_v0_10_0_testrand256_test(bin); + if (rustsecp256k1_v0_10_0_fe_set_b32_limit(x, bin)) { return; } } while(1); } -static void random_fe_non_zero_test(rustsecp256k1_v0_9_2_fe *fe) { +static void random_fe_non_zero_test(rustsecp256k1_v0_10_0_fe *fe) { do { random_fe_test(fe); - } while(rustsecp256k1_v0_9_2_fe_is_zero(fe)); + } while(rustsecp256k1_v0_10_0_fe_is_zero(fe)); } -static void random_fe_magnitude(rustsecp256k1_v0_9_2_fe *fe) { +static void random_fe_magnitude(rustsecp256k1_v0_10_0_fe *fe) { random_field_element_magnitude(fe, 8); } -static void random_ge_x_magnitude(rustsecp256k1_v0_9_2_ge *ge) { +static void random_ge_x_magnitude(rustsecp256k1_v0_10_0_ge *ge) { random_field_element_magnitude(&ge->x, SECP256K1_GE_X_MAGNITUDE_MAX); } -static void random_ge_y_magnitude(rustsecp256k1_v0_9_2_ge *ge) { +static void random_ge_y_magnitude(rustsecp256k1_v0_10_0_ge *ge) { random_field_element_magnitude(&ge->y, SECP256K1_GE_Y_MAGNITUDE_MAX); } -static void random_gej_x_magnitude(rustsecp256k1_v0_9_2_gej *gej) { +static void random_gej_x_magnitude(rustsecp256k1_v0_10_0_gej *gej) { random_field_element_magnitude(&gej->x, SECP256K1_GEJ_X_MAGNITUDE_MAX); } -static void random_gej_y_magnitude(rustsecp256k1_v0_9_2_gej *gej) { +static void random_gej_y_magnitude(rustsecp256k1_v0_10_0_gej *gej) { random_field_element_magnitude(&gej->y, SECP256K1_GEJ_Y_MAGNITUDE_MAX); } -static void random_gej_z_magnitude(rustsecp256k1_v0_9_2_gej *gej) { +static void random_gej_z_magnitude(rustsecp256k1_v0_10_0_gej *gej) { random_field_element_magnitude(&gej->z, SECP256K1_GEJ_Z_MAGNITUDE_MAX); } -static void random_group_element_test(rustsecp256k1_v0_9_2_ge *ge) { - rustsecp256k1_v0_9_2_fe fe; +static void random_group_element_test(rustsecp256k1_v0_10_0_ge *ge) { + rustsecp256k1_v0_10_0_fe fe; do { random_fe_test(&fe); - if (rustsecp256k1_v0_9_2_ge_set_xo_var(ge, &fe, rustsecp256k1_v0_9_2_testrand_bits(1))) { - rustsecp256k1_v0_9_2_fe_normalize(&ge->y); + if (rustsecp256k1_v0_10_0_ge_set_xo_var(ge, &fe, rustsecp256k1_v0_10_0_testrand_bits(1))) { + rustsecp256k1_v0_10_0_fe_normalize(&ge->y); break; } } while(1); ge->infinity = 0; } -static void random_group_element_jacobian_test(rustsecp256k1_v0_9_2_gej *gej, const rustsecp256k1_v0_9_2_ge *ge) { - rustsecp256k1_v0_9_2_fe z2, z3; +static void random_group_element_jacobian_test(rustsecp256k1_v0_10_0_gej *gej, const rustsecp256k1_v0_10_0_ge *ge) { + rustsecp256k1_v0_10_0_fe z2, z3; random_fe_non_zero_test(&gej->z); - rustsecp256k1_v0_9_2_fe_sqr(&z2, &gej->z); - rustsecp256k1_v0_9_2_fe_mul(&z3, &z2, &gej->z); - rustsecp256k1_v0_9_2_fe_mul(&gej->x, &ge->x, &z2); - rustsecp256k1_v0_9_2_fe_mul(&gej->y, &ge->y, &z3); + rustsecp256k1_v0_10_0_fe_sqr(&z2, &gej->z); + rustsecp256k1_v0_10_0_fe_mul(&z3, &z2, &gej->z); + rustsecp256k1_v0_10_0_fe_mul(&gej->x, &ge->x, &z2); + rustsecp256k1_v0_10_0_fe_mul(&gej->y, &ge->y, &z3); gej->infinity = ge->infinity; } -static void random_gej_test(rustsecp256k1_v0_9_2_gej *gej) { - rustsecp256k1_v0_9_2_ge ge; +static void random_gej_test(rustsecp256k1_v0_10_0_gej *gej) { + rustsecp256k1_v0_10_0_ge ge; random_group_element_test(&ge); random_group_element_jacobian_test(gej, &ge); } -static void random_scalar_order_test(rustsecp256k1_v0_9_2_scalar *num) { +static void random_scalar_order_test(rustsecp256k1_v0_10_0_scalar *num) { do { unsigned char b32[32]; int overflow = 0; - rustsecp256k1_v0_9_2_testrand256_test(b32); - rustsecp256k1_v0_9_2_scalar_set_b32(num, b32, &overflow); - if (overflow || rustsecp256k1_v0_9_2_scalar_is_zero(num)) { + rustsecp256k1_v0_10_0_testrand256_test(b32); + rustsecp256k1_v0_10_0_scalar_set_b32(num, b32, &overflow); + if (overflow || rustsecp256k1_v0_10_0_scalar_is_zero(num)) { continue; } break; } while(1); } -static void random_scalar_order(rustsecp256k1_v0_9_2_scalar *num) { +static void random_scalar_order(rustsecp256k1_v0_10_0_scalar *num) { do { unsigned char b32[32]; int overflow = 0; - rustsecp256k1_v0_9_2_testrand256(b32); - rustsecp256k1_v0_9_2_scalar_set_b32(num, b32, &overflow); - if (overflow || rustsecp256k1_v0_9_2_scalar_is_zero(num)) { + rustsecp256k1_v0_10_0_testrand256(b32); + rustsecp256k1_v0_10_0_scalar_set_b32(num, b32, &overflow); + if (overflow || rustsecp256k1_v0_10_0_scalar_is_zero(num)) { continue; } break; @@ -200,17 +207,17 @@ static void random_scalar_order(rustsecp256k1_v0_9_2_scalar *num) { } static void random_scalar_order_b32(unsigned char *b32) { - rustsecp256k1_v0_9_2_scalar num; + rustsecp256k1_v0_10_0_scalar num; random_scalar_order(&num); - rustsecp256k1_v0_9_2_scalar_get_b32(b32, &num); + rustsecp256k1_v0_10_0_scalar_get_b32(b32, &num); } static void run_xoshiro256pp_tests(void) { { size_t i; /* Sanity check that we run before the actual seeding. */ - for (i = 0; i < sizeof(rustsecp256k1_v0_9_2_test_state)/sizeof(rustsecp256k1_v0_9_2_test_state[0]); i++) { - CHECK(rustsecp256k1_v0_9_2_test_state[i] == 0); + for (i = 0; i < sizeof(rustsecp256k1_v0_10_0_test_state)/sizeof(rustsecp256k1_v0_10_0_test_state[0]); i++) { + CHECK(rustsecp256k1_v0_10_0_test_state[i] == 0); } } { @@ -226,26 +233,26 @@ static void run_xoshiro256pp_tests(void) { 0x4C, 0xCC, 0xC1, 0x18, 0xB2, 0xD8, 0x8F, 0xEF, 0x43, 0x26, 0x15, 0x57, 0x37, 0x00, 0xEF, 0x30, }; - rustsecp256k1_v0_9_2_testrand_seed(seed16); + rustsecp256k1_v0_10_0_testrand_seed(seed16); for (i = 0; i < 17; i++) { - rustsecp256k1_v0_9_2_testrand256(buf32); + rustsecp256k1_v0_10_0_testrand256(buf32); } - CHECK(rustsecp256k1_v0_9_2_memcmp_var(buf32, buf32_expected, sizeof(buf32)) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(buf32, buf32_expected, sizeof(buf32)) == 0); } } static void run_selftest_tests(void) { /* Test public API */ - rustsecp256k1_v0_9_2_selftest(); + rustsecp256k1_v0_10_0_selftest(); } -static int ecmult_gen_context_eq(const rustsecp256k1_v0_9_2_ecmult_gen_context *a, const rustsecp256k1_v0_9_2_ecmult_gen_context *b) { +static int ecmult_gen_context_eq(const rustsecp256k1_v0_10_0_ecmult_gen_context *a, const rustsecp256k1_v0_10_0_ecmult_gen_context *b) { return a->built == b->built - && rustsecp256k1_v0_9_2_scalar_eq(&a->blind, &b->blind) - && rustsecp256k1_v0_9_2_gej_eq_var(&a->initial, &b->initial); + && rustsecp256k1_v0_10_0_scalar_eq(&a->blind, &b->blind) + && rustsecp256k1_v0_10_0_gej_eq_var(&a->initial, &b->initial); } -static int context_eq(const rustsecp256k1_v0_9_2_context *a, const rustsecp256k1_v0_9_2_context *b) { +static int context_eq(const rustsecp256k1_v0_10_0_context *a, const rustsecp256k1_v0_10_0_context *b) { return a->declassify == b->declassify && ecmult_gen_context_eq(&a->ecmult_gen_ctx, &b->ecmult_gen_ctx) && a->illegal_callback.fn == b->illegal_callback.fn @@ -260,315 +267,280 @@ static void run_deprecated_context_flags_test(void) { unsigned int flags[] = { SECP256K1_CONTEXT_SIGN, SECP256K1_CONTEXT_VERIFY, SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY }; - rustsecp256k1_v0_9_2_context *none_ctx = rustsecp256k1_v0_9_2_context_create(SECP256K1_CONTEXT_NONE); + rustsecp256k1_v0_10_0_context *none_ctx = rustsecp256k1_v0_10_0_context_create(SECP256K1_CONTEXT_NONE); int i; for (i = 0; i < (int)(sizeof(flags)/sizeof(flags[0])); i++) { - rustsecp256k1_v0_9_2_context *tmp_ctx; - CHECK(rustsecp256k1_v0_9_2_context_preallocated_size(SECP256K1_CONTEXT_NONE) == rustsecp256k1_v0_9_2_context_preallocated_size(flags[i])); - tmp_ctx = rustsecp256k1_v0_9_2_context_create(flags[i]); + rustsecp256k1_v0_10_0_context *tmp_ctx; + CHECK(rustsecp256k1_v0_10_0_context_preallocated_size(SECP256K1_CONTEXT_NONE) == rustsecp256k1_v0_10_0_context_preallocated_size(flags[i])); + tmp_ctx = rustsecp256k1_v0_10_0_context_create(flags[i]); CHECK(context_eq(none_ctx, tmp_ctx)); - rustsecp256k1_v0_9_2_context_destroy(tmp_ctx); + rustsecp256k1_v0_10_0_context_destroy(tmp_ctx); } - rustsecp256k1_v0_9_2_context_destroy(none_ctx); + rustsecp256k1_v0_10_0_context_destroy(none_ctx); } static void run_ec_illegal_argument_tests(void) { - int ecount = 0; - int ecount2 = 10; - rustsecp256k1_v0_9_2_pubkey pubkey; - rustsecp256k1_v0_9_2_pubkey zero_pubkey; - rustsecp256k1_v0_9_2_ecdsa_signature sig; + rustsecp256k1_v0_10_0_pubkey pubkey; + rustsecp256k1_v0_10_0_pubkey zero_pubkey; + rustsecp256k1_v0_10_0_ecdsa_signature sig; unsigned char ctmp[32]; /* Setup */ - rustsecp256k1_v0_9_2_context_set_illegal_callback(STATIC_CTX, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_9_2_context_set_illegal_callback(CTX, counting_illegal_callback_fn, &ecount2); memset(ctmp, 1, 32); memset(&zero_pubkey, 0, sizeof(zero_pubkey)); /* Verify context-type checking illegal-argument errors. */ - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(STATIC_CTX, &pubkey, ctmp) == 0); - CHECK(ecount == 1); + CHECK_ILLEGAL(STATIC_CTX, rustsecp256k1_v0_10_0_ec_pubkey_create(STATIC_CTX, &pubkey, ctmp)); SECP256K1_CHECKMEM_UNDEFINE(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, &pubkey, ctmp) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &pubkey, ctmp) == 1); SECP256K1_CHECKMEM_CHECK(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign(STATIC_CTX, &sig, ctmp, ctmp, NULL, NULL) == 0); - CHECK(ecount == 2); + CHECK_ILLEGAL(STATIC_CTX, rustsecp256k1_v0_10_0_ecdsa_sign(STATIC_CTX, &sig, ctmp, ctmp, NULL, NULL)); SECP256K1_CHECKMEM_UNDEFINE(&sig, sizeof(sig)); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign(CTX, &sig, ctmp, ctmp, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign(CTX, &sig, ctmp, ctmp, NULL, NULL) == 1); SECP256K1_CHECKMEM_CHECK(&sig, sizeof(sig)); - CHECK(ecount2 == 10); - CHECK(rustsecp256k1_v0_9_2_ecdsa_verify(CTX, &sig, ctmp, &pubkey) == 1); - CHECK(ecount2 == 10); - CHECK(rustsecp256k1_v0_9_2_ecdsa_verify(STATIC_CTX, &sig, ctmp, &pubkey) == 1); - CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_tweak_add(CTX, &pubkey, ctmp) == 1); - CHECK(ecount2 == 10); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_tweak_add(STATIC_CTX, &pubkey, ctmp) == 1); - CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_tweak_mul(CTX, &pubkey, ctmp) == 1); - CHECK(ecount2 == 10); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_negate(STATIC_CTX, &pubkey) == 1); - CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_negate(CTX, &pubkey) == 1); - CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_negate(STATIC_CTX, &zero_pubkey) == 0); - CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_negate(CTX, NULL) == 0); - CHECK(ecount2 == 11); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_tweak_mul(STATIC_CTX, &pubkey, ctmp) == 1); - CHECK(ecount == 3); - - /* Clean up */ - rustsecp256k1_v0_9_2_context_set_illegal_callback(STATIC_CTX, NULL, NULL); - rustsecp256k1_v0_9_2_context_set_illegal_callback(CTX, NULL, NULL); + CHECK(rustsecp256k1_v0_10_0_ecdsa_verify(CTX, &sig, ctmp, &pubkey) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_verify(STATIC_CTX, &sig, ctmp, &pubkey) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_tweak_add(CTX, &pubkey, ctmp) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_tweak_add(STATIC_CTX, &pubkey, ctmp) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_tweak_mul(CTX, &pubkey, ctmp) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_negate(STATIC_CTX, &pubkey) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_negate(CTX, &pubkey) == 1); + CHECK_ILLEGAL(STATIC_CTX, rustsecp256k1_v0_10_0_ec_pubkey_negate(STATIC_CTX, &zero_pubkey)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ec_pubkey_negate(CTX, NULL)); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_tweak_mul(STATIC_CTX, &pubkey, ctmp) == 1); } static void run_static_context_tests(int use_prealloc) { - /* Check that deprecated rustsecp256k1_v0_9_2_context_no_precomp is an alias to rustsecp256k1_v0_9_2_context_static. */ - CHECK(rustsecp256k1_v0_9_2_context_no_precomp == rustsecp256k1_v0_9_2_context_static); + /* Check that deprecated rustsecp256k1_v0_10_0_context_no_precomp is an alias to rustsecp256k1_v0_10_0_context_static. */ + CHECK(rustsecp256k1_v0_10_0_context_no_precomp == rustsecp256k1_v0_10_0_context_static); { unsigned char seed[32] = {0x17}; - /* Randomizing rustsecp256k1_v0_9_2_context_static is not supported. */ - CHECK_ILLEGAL(STATIC_CTX, rustsecp256k1_v0_9_2_context_randomize(STATIC_CTX, seed)); - CHECK_ILLEGAL(STATIC_CTX, rustsecp256k1_v0_9_2_context_randomize(STATIC_CTX, NULL)); + /* Randomizing rustsecp256k1_v0_10_0_context_static is not supported. */ + CHECK_ILLEGAL(STATIC_CTX, rustsecp256k1_v0_10_0_context_randomize(STATIC_CTX, seed)); + CHECK_ILLEGAL(STATIC_CTX, rustsecp256k1_v0_10_0_context_randomize(STATIC_CTX, NULL)); - /* Destroying or cloning rustsecp256k1_v0_9_2_context_static is not supported. */ + /* Destroying or cloning rustsecp256k1_v0_10_0_context_static is not supported. */ if (use_prealloc) { - CHECK_ILLEGAL(STATIC_CTX, rustsecp256k1_v0_9_2_context_preallocated_clone_size(STATIC_CTX)); + CHECK_ILLEGAL(STATIC_CTX, rustsecp256k1_v0_10_0_context_preallocated_clone_size(STATIC_CTX)); { - rustsecp256k1_v0_9_2_context *my_static_ctx = malloc(sizeof(*STATIC_CTX)); + rustsecp256k1_v0_10_0_context *my_static_ctx = malloc(sizeof(*STATIC_CTX)); CHECK(my_static_ctx != NULL); memset(my_static_ctx, 0x2a, sizeof(*my_static_ctx)); - CHECK_ILLEGAL(STATIC_CTX, rustsecp256k1_v0_9_2_context_preallocated_clone(STATIC_CTX, my_static_ctx)); + CHECK_ILLEGAL(STATIC_CTX, rustsecp256k1_v0_10_0_context_preallocated_clone(STATIC_CTX, my_static_ctx)); CHECK(all_bytes_equal(my_static_ctx, 0x2a, sizeof(*my_static_ctx))); free(my_static_ctx); } - CHECK_ILLEGAL_VOID(STATIC_CTX, rustsecp256k1_v0_9_2_context_preallocated_destroy(STATIC_CTX)); + CHECK_ILLEGAL_VOID(STATIC_CTX, rustsecp256k1_v0_10_0_context_preallocated_destroy(STATIC_CTX)); } else { - CHECK_ILLEGAL(STATIC_CTX, rustsecp256k1_v0_9_2_context_clone(STATIC_CTX)); - CHECK_ILLEGAL_VOID(STATIC_CTX, rustsecp256k1_v0_9_2_context_destroy(STATIC_CTX)); + CHECK_ILLEGAL(STATIC_CTX, rustsecp256k1_v0_10_0_context_clone(STATIC_CTX)); + CHECK_ILLEGAL_VOID(STATIC_CTX, rustsecp256k1_v0_10_0_context_destroy(STATIC_CTX)); } } { /* Verify that setting and resetting illegal callback works */ int32_t dummy = 0; - rustsecp256k1_v0_9_2_context_set_illegal_callback(STATIC_CTX, counting_illegal_callback_fn, &dummy); - CHECK(STATIC_CTX->illegal_callback.fn == counting_illegal_callback_fn); + rustsecp256k1_v0_10_0_context_set_illegal_callback(STATIC_CTX, counting_callback_fn, &dummy); + CHECK(STATIC_CTX->illegal_callback.fn == counting_callback_fn); CHECK(STATIC_CTX->illegal_callback.data == &dummy); - rustsecp256k1_v0_9_2_context_set_illegal_callback(STATIC_CTX, NULL, NULL); - CHECK(STATIC_CTX->illegal_callback.fn == rustsecp256k1_v0_9_2_default_illegal_callback_fn); + rustsecp256k1_v0_10_0_context_set_illegal_callback(STATIC_CTX, NULL, NULL); + CHECK(STATIC_CTX->illegal_callback.fn == rustsecp256k1_v0_10_0_default_illegal_callback_fn); CHECK(STATIC_CTX->illegal_callback.data == NULL); } } static void run_proper_context_tests(int use_prealloc) { int32_t dummy = 0; - rustsecp256k1_v0_9_2_context *my_ctx, *my_ctx_fresh; + rustsecp256k1_v0_10_0_context *my_ctx, *my_ctx_fresh; void *my_ctx_prealloc = NULL; unsigned char seed[32] = {0x17}; - rustsecp256k1_v0_9_2_gej pubj; - rustsecp256k1_v0_9_2_ge pub; - rustsecp256k1_v0_9_2_scalar msg, key, nonce; - rustsecp256k1_v0_9_2_scalar sigr, sigs; + rustsecp256k1_v0_10_0_gej pubj; + rustsecp256k1_v0_10_0_ge pub; + rustsecp256k1_v0_10_0_scalar msg, key, nonce; + rustsecp256k1_v0_10_0_scalar sigr, sigs; /* Fresh reference context for comparison */ - my_ctx_fresh = rustsecp256k1_v0_9_2_context_create(SECP256K1_CONTEXT_NONE); + my_ctx_fresh = rustsecp256k1_v0_10_0_context_create(SECP256K1_CONTEXT_NONE); if (use_prealloc) { - my_ctx_prealloc = malloc(rustsecp256k1_v0_9_2_context_preallocated_size(SECP256K1_CONTEXT_NONE)); + my_ctx_prealloc = malloc(rustsecp256k1_v0_10_0_context_preallocated_size(SECP256K1_CONTEXT_NONE)); CHECK(my_ctx_prealloc != NULL); - my_ctx = rustsecp256k1_v0_9_2_context_preallocated_create(my_ctx_prealloc, SECP256K1_CONTEXT_NONE); + my_ctx = rustsecp256k1_v0_10_0_context_preallocated_create(my_ctx_prealloc, SECP256K1_CONTEXT_NONE); } else { - my_ctx = rustsecp256k1_v0_9_2_context_create(SECP256K1_CONTEXT_NONE); + my_ctx = rustsecp256k1_v0_10_0_context_create(SECP256K1_CONTEXT_NONE); } /* Randomize and reset randomization */ CHECK(context_eq(my_ctx, my_ctx_fresh)); - CHECK(rustsecp256k1_v0_9_2_context_randomize(my_ctx, seed) == 1); + CHECK(rustsecp256k1_v0_10_0_context_randomize(my_ctx, seed) == 1); CHECK(!context_eq(my_ctx, my_ctx_fresh)); - CHECK(rustsecp256k1_v0_9_2_context_randomize(my_ctx, NULL) == 1); + CHECK(rustsecp256k1_v0_10_0_context_randomize(my_ctx, NULL) == 1); CHECK(context_eq(my_ctx, my_ctx_fresh)); - /* set error callback (to a function that still aborts in case malloc() fails in rustsecp256k1_v0_9_2_context_clone() below) */ - rustsecp256k1_v0_9_2_context_set_error_callback(my_ctx, rustsecp256k1_v0_9_2_default_illegal_callback_fn, NULL); - CHECK(my_ctx->error_callback.fn != rustsecp256k1_v0_9_2_default_error_callback_fn); - CHECK(my_ctx->error_callback.fn == rustsecp256k1_v0_9_2_default_illegal_callback_fn); + /* set error callback (to a function that still aborts in case malloc() fails in rustsecp256k1_v0_10_0_context_clone() below) */ + rustsecp256k1_v0_10_0_context_set_error_callback(my_ctx, rustsecp256k1_v0_10_0_default_illegal_callback_fn, NULL); + CHECK(my_ctx->error_callback.fn != rustsecp256k1_v0_10_0_default_error_callback_fn); + CHECK(my_ctx->error_callback.fn == rustsecp256k1_v0_10_0_default_illegal_callback_fn); /* check if sizes for cloning are consistent */ - CHECK(rustsecp256k1_v0_9_2_context_preallocated_clone_size(my_ctx) == rustsecp256k1_v0_9_2_context_preallocated_size(SECP256K1_CONTEXT_NONE)); + CHECK(rustsecp256k1_v0_10_0_context_preallocated_clone_size(my_ctx) == rustsecp256k1_v0_10_0_context_preallocated_size(SECP256K1_CONTEXT_NONE)); /*** clone and destroy all of them to make sure cloning was complete ***/ { - rustsecp256k1_v0_9_2_context *ctx_tmp; + rustsecp256k1_v0_10_0_context *ctx_tmp; if (use_prealloc) { /* clone into a non-preallocated context and then again into a new preallocated one. */ ctx_tmp = my_ctx; - my_ctx = rustsecp256k1_v0_9_2_context_clone(my_ctx); + my_ctx = rustsecp256k1_v0_10_0_context_clone(my_ctx); CHECK(context_eq(ctx_tmp, my_ctx)); - rustsecp256k1_v0_9_2_context_preallocated_destroy(ctx_tmp); + rustsecp256k1_v0_10_0_context_preallocated_destroy(ctx_tmp); free(my_ctx_prealloc); - my_ctx_prealloc = malloc(rustsecp256k1_v0_9_2_context_preallocated_size(SECP256K1_CONTEXT_NONE)); + my_ctx_prealloc = malloc(rustsecp256k1_v0_10_0_context_preallocated_size(SECP256K1_CONTEXT_NONE)); CHECK(my_ctx_prealloc != NULL); ctx_tmp = my_ctx; - my_ctx = rustsecp256k1_v0_9_2_context_preallocated_clone(my_ctx, my_ctx_prealloc); + my_ctx = rustsecp256k1_v0_10_0_context_preallocated_clone(my_ctx, my_ctx_prealloc); CHECK(context_eq(ctx_tmp, my_ctx)); - rustsecp256k1_v0_9_2_context_destroy(ctx_tmp); + rustsecp256k1_v0_10_0_context_destroy(ctx_tmp); } else { /* clone into a preallocated context and then again into a new non-preallocated one. */ void *prealloc_tmp; - prealloc_tmp = malloc(rustsecp256k1_v0_9_2_context_preallocated_size(SECP256K1_CONTEXT_NONE)); + prealloc_tmp = malloc(rustsecp256k1_v0_10_0_context_preallocated_size(SECP256K1_CONTEXT_NONE)); CHECK(prealloc_tmp != NULL); ctx_tmp = my_ctx; - my_ctx = rustsecp256k1_v0_9_2_context_preallocated_clone(my_ctx, prealloc_tmp); + my_ctx = rustsecp256k1_v0_10_0_context_preallocated_clone(my_ctx, prealloc_tmp); CHECK(context_eq(ctx_tmp, my_ctx)); - rustsecp256k1_v0_9_2_context_destroy(ctx_tmp); + rustsecp256k1_v0_10_0_context_destroy(ctx_tmp); ctx_tmp = my_ctx; - my_ctx = rustsecp256k1_v0_9_2_context_clone(my_ctx); + my_ctx = rustsecp256k1_v0_10_0_context_clone(my_ctx); CHECK(context_eq(ctx_tmp, my_ctx)); - rustsecp256k1_v0_9_2_context_preallocated_destroy(ctx_tmp); + rustsecp256k1_v0_10_0_context_preallocated_destroy(ctx_tmp); free(prealloc_tmp); } } /* Verify that the error callback makes it across the clone. */ - CHECK(my_ctx->error_callback.fn != rustsecp256k1_v0_9_2_default_error_callback_fn); - CHECK(my_ctx->error_callback.fn == rustsecp256k1_v0_9_2_default_illegal_callback_fn); + CHECK(my_ctx->error_callback.fn != rustsecp256k1_v0_10_0_default_error_callback_fn); + CHECK(my_ctx->error_callback.fn == rustsecp256k1_v0_10_0_default_illegal_callback_fn); /* And that it resets back to default. */ - rustsecp256k1_v0_9_2_context_set_error_callback(my_ctx, NULL, NULL); - CHECK(my_ctx->error_callback.fn == rustsecp256k1_v0_9_2_default_error_callback_fn); + rustsecp256k1_v0_10_0_context_set_error_callback(my_ctx, NULL, NULL); + CHECK(my_ctx->error_callback.fn == rustsecp256k1_v0_10_0_default_error_callback_fn); CHECK(context_eq(my_ctx, my_ctx_fresh)); /* Verify that setting and resetting illegal callback works */ - rustsecp256k1_v0_9_2_context_set_illegal_callback(my_ctx, counting_illegal_callback_fn, &dummy); - CHECK(my_ctx->illegal_callback.fn == counting_illegal_callback_fn); + rustsecp256k1_v0_10_0_context_set_illegal_callback(my_ctx, counting_callback_fn, &dummy); + CHECK(my_ctx->illegal_callback.fn == counting_callback_fn); CHECK(my_ctx->illegal_callback.data == &dummy); - rustsecp256k1_v0_9_2_context_set_illegal_callback(my_ctx, NULL, NULL); - CHECK(my_ctx->illegal_callback.fn == rustsecp256k1_v0_9_2_default_illegal_callback_fn); + rustsecp256k1_v0_10_0_context_set_illegal_callback(my_ctx, NULL, NULL); + CHECK(my_ctx->illegal_callback.fn == rustsecp256k1_v0_10_0_default_illegal_callback_fn); CHECK(my_ctx->illegal_callback.data == NULL); CHECK(context_eq(my_ctx, my_ctx_fresh)); /*** attempt to use them ***/ random_scalar_order_test(&msg); random_scalar_order_test(&key); - rustsecp256k1_v0_9_2_ecmult_gen(&my_ctx->ecmult_gen_ctx, &pubj, &key); - rustsecp256k1_v0_9_2_ge_set_gej(&pub, &pubj); + rustsecp256k1_v0_10_0_ecmult_gen(&my_ctx->ecmult_gen_ctx, &pubj, &key); + rustsecp256k1_v0_10_0_ge_set_gej(&pub, &pubj); /* obtain a working nonce */ do { random_scalar_order_test(&nonce); - } while(!rustsecp256k1_v0_9_2_ecdsa_sig_sign(&my_ctx->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL)); + } while(!rustsecp256k1_v0_10_0_ecdsa_sig_sign(&my_ctx->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL)); /* try signing */ - CHECK(rustsecp256k1_v0_9_2_ecdsa_sig_sign(&my_ctx->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL)); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sig_sign(&my_ctx->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL)); /* try verifying */ - CHECK(rustsecp256k1_v0_9_2_ecdsa_sig_verify(&sigr, &sigs, &pub, &msg)); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sig_verify(&sigr, &sigs, &pub, &msg)); /* cleanup */ if (use_prealloc) { - rustsecp256k1_v0_9_2_context_preallocated_destroy(my_ctx); + rustsecp256k1_v0_10_0_context_preallocated_destroy(my_ctx); free(my_ctx_prealloc); } else { - rustsecp256k1_v0_9_2_context_destroy(my_ctx); + rustsecp256k1_v0_10_0_context_destroy(my_ctx); } - rustsecp256k1_v0_9_2_context_destroy(my_ctx_fresh); + rustsecp256k1_v0_10_0_context_destroy(my_ctx_fresh); /* Defined as no-op. */ - rustsecp256k1_v0_9_2_context_destroy(NULL); - rustsecp256k1_v0_9_2_context_preallocated_destroy(NULL); + rustsecp256k1_v0_10_0_context_destroy(NULL); + rustsecp256k1_v0_10_0_context_preallocated_destroy(NULL); } static void run_scratch_tests(void) { const size_t adj_alloc = ((500 + ALIGNMENT - 1) / ALIGNMENT) * ALIGNMENT; - int32_t ecount = 0; size_t checkpoint; size_t checkpoint_2; - rustsecp256k1_v0_9_2_scratch_space *scratch; - rustsecp256k1_v0_9_2_scratch_space local_scratch; - - rustsecp256k1_v0_9_2_context_set_illegal_callback(CTX, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_9_2_context_set_error_callback(CTX, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_10_0_scratch_space *scratch; + rustsecp256k1_v0_10_0_scratch_space local_scratch; /* Test public API */ - scratch = rustsecp256k1_v0_9_2_scratch_space_create(CTX, 1000); + scratch = rustsecp256k1_v0_10_0_scratch_space_create(CTX, 1000); CHECK(scratch != NULL); - CHECK(ecount == 0); /* Test internal API */ - CHECK(rustsecp256k1_v0_9_2_scratch_max_allocation(&CTX->error_callback, scratch, 0) == 1000); - CHECK(rustsecp256k1_v0_9_2_scratch_max_allocation(&CTX->error_callback, scratch, 1) == 1000 - (ALIGNMENT - 1)); + CHECK(rustsecp256k1_v0_10_0_scratch_max_allocation(&CTX->error_callback, scratch, 0) == 1000); + CHECK(rustsecp256k1_v0_10_0_scratch_max_allocation(&CTX->error_callback, scratch, 1) == 1000 - (ALIGNMENT - 1)); CHECK(scratch->alloc_size == 0); CHECK(scratch->alloc_size % ALIGNMENT == 0); /* Allocating 500 bytes succeeds */ - checkpoint = rustsecp256k1_v0_9_2_scratch_checkpoint(&CTX->error_callback, scratch); - CHECK(rustsecp256k1_v0_9_2_scratch_alloc(&CTX->error_callback, scratch, 500) != NULL); - CHECK(rustsecp256k1_v0_9_2_scratch_max_allocation(&CTX->error_callback, scratch, 0) == 1000 - adj_alloc); - CHECK(rustsecp256k1_v0_9_2_scratch_max_allocation(&CTX->error_callback, scratch, 1) == 1000 - adj_alloc - (ALIGNMENT - 1)); + checkpoint = rustsecp256k1_v0_10_0_scratch_checkpoint(&CTX->error_callback, scratch); + CHECK(rustsecp256k1_v0_10_0_scratch_alloc(&CTX->error_callback, scratch, 500) != NULL); + CHECK(rustsecp256k1_v0_10_0_scratch_max_allocation(&CTX->error_callback, scratch, 0) == 1000 - adj_alloc); + CHECK(rustsecp256k1_v0_10_0_scratch_max_allocation(&CTX->error_callback, scratch, 1) == 1000 - adj_alloc - (ALIGNMENT - 1)); CHECK(scratch->alloc_size != 0); CHECK(scratch->alloc_size % ALIGNMENT == 0); /* Allocating another 501 bytes fails */ - CHECK(rustsecp256k1_v0_9_2_scratch_alloc(&CTX->error_callback, scratch, 501) == NULL); - CHECK(rustsecp256k1_v0_9_2_scratch_max_allocation(&CTX->error_callback, scratch, 0) == 1000 - adj_alloc); - CHECK(rustsecp256k1_v0_9_2_scratch_max_allocation(&CTX->error_callback, scratch, 1) == 1000 - adj_alloc - (ALIGNMENT - 1)); + CHECK(rustsecp256k1_v0_10_0_scratch_alloc(&CTX->error_callback, scratch, 501) == NULL); + CHECK(rustsecp256k1_v0_10_0_scratch_max_allocation(&CTX->error_callback, scratch, 0) == 1000 - adj_alloc); + CHECK(rustsecp256k1_v0_10_0_scratch_max_allocation(&CTX->error_callback, scratch, 1) == 1000 - adj_alloc - (ALIGNMENT - 1)); CHECK(scratch->alloc_size != 0); CHECK(scratch->alloc_size % ALIGNMENT == 0); /* ...but it succeeds once we apply the checkpoint to undo it */ - rustsecp256k1_v0_9_2_scratch_apply_checkpoint(&CTX->error_callback, scratch, checkpoint); + rustsecp256k1_v0_10_0_scratch_apply_checkpoint(&CTX->error_callback, scratch, checkpoint); CHECK(scratch->alloc_size == 0); - CHECK(rustsecp256k1_v0_9_2_scratch_max_allocation(&CTX->error_callback, scratch, 0) == 1000); - CHECK(rustsecp256k1_v0_9_2_scratch_alloc(&CTX->error_callback, scratch, 500) != NULL); + CHECK(rustsecp256k1_v0_10_0_scratch_max_allocation(&CTX->error_callback, scratch, 0) == 1000); + CHECK(rustsecp256k1_v0_10_0_scratch_alloc(&CTX->error_callback, scratch, 500) != NULL); CHECK(scratch->alloc_size != 0); /* try to apply a bad checkpoint */ - checkpoint_2 = rustsecp256k1_v0_9_2_scratch_checkpoint(&CTX->error_callback, scratch); - rustsecp256k1_v0_9_2_scratch_apply_checkpoint(&CTX->error_callback, scratch, checkpoint); - CHECK(ecount == 0); - rustsecp256k1_v0_9_2_scratch_apply_checkpoint(&CTX->error_callback, scratch, checkpoint_2); /* checkpoint_2 is after checkpoint */ - CHECK(ecount == 1); - rustsecp256k1_v0_9_2_scratch_apply_checkpoint(&CTX->error_callback, scratch, (size_t) -1); /* this is just wildly invalid */ - CHECK(ecount == 2); + checkpoint_2 = rustsecp256k1_v0_10_0_scratch_checkpoint(&CTX->error_callback, scratch); + rustsecp256k1_v0_10_0_scratch_apply_checkpoint(&CTX->error_callback, scratch, checkpoint); + CHECK_ERROR_VOID(CTX, rustsecp256k1_v0_10_0_scratch_apply_checkpoint(&CTX->error_callback, scratch, checkpoint_2)); /* checkpoint_2 is after checkpoint */ + CHECK_ERROR_VOID(CTX, rustsecp256k1_v0_10_0_scratch_apply_checkpoint(&CTX->error_callback, scratch, (size_t) -1)); /* this is just wildly invalid */ /* try to use badly initialized scratch space */ - rustsecp256k1_v0_9_2_scratch_space_destroy(CTX, scratch); + rustsecp256k1_v0_10_0_scratch_space_destroy(CTX, scratch); memset(&local_scratch, 0, sizeof(local_scratch)); scratch = &local_scratch; - CHECK(!rustsecp256k1_v0_9_2_scratch_max_allocation(&CTX->error_callback, scratch, 0)); - CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_9_2_scratch_alloc(&CTX->error_callback, scratch, 500) == NULL); - CHECK(ecount == 4); - rustsecp256k1_v0_9_2_scratch_space_destroy(CTX, scratch); - CHECK(ecount == 5); + CHECK_ERROR(CTX, rustsecp256k1_v0_10_0_scratch_max_allocation(&CTX->error_callback, scratch, 0)); + CHECK_ERROR(CTX, rustsecp256k1_v0_10_0_scratch_alloc(&CTX->error_callback, scratch, 500)); + CHECK_ERROR_VOID(CTX, rustsecp256k1_v0_10_0_scratch_space_destroy(CTX, scratch)); /* Test that large integers do not wrap around in a bad way */ - scratch = rustsecp256k1_v0_9_2_scratch_space_create(CTX, 1000); + scratch = rustsecp256k1_v0_10_0_scratch_space_create(CTX, 1000); /* Try max allocation with a large number of objects. Only makes sense if * ALIGNMENT is greater than 1 because otherwise the objects take no extra * space. */ - CHECK(ALIGNMENT <= 1 || !rustsecp256k1_v0_9_2_scratch_max_allocation(&CTX->error_callback, scratch, (SIZE_MAX / (ALIGNMENT - 1)) + 1)); + CHECK(ALIGNMENT <= 1 || !rustsecp256k1_v0_10_0_scratch_max_allocation(&CTX->error_callback, scratch, (SIZE_MAX / (ALIGNMENT - 1)) + 1)); /* Try allocating SIZE_MAX to test wrap around which only happens if * ALIGNMENT > 1, otherwise it returns NULL anyway because the scratch * space is too small. */ - CHECK(rustsecp256k1_v0_9_2_scratch_alloc(&CTX->error_callback, scratch, SIZE_MAX) == NULL); - rustsecp256k1_v0_9_2_scratch_space_destroy(CTX, scratch); + CHECK(rustsecp256k1_v0_10_0_scratch_alloc(&CTX->error_callback, scratch, SIZE_MAX) == NULL); + rustsecp256k1_v0_10_0_scratch_space_destroy(CTX, scratch); /* cleanup */ - rustsecp256k1_v0_9_2_scratch_space_destroy(CTX, NULL); /* no-op */ - - rustsecp256k1_v0_9_2_context_set_illegal_callback(CTX, NULL, NULL); - rustsecp256k1_v0_9_2_context_set_error_callback(CTX, NULL, NULL); + rustsecp256k1_v0_10_0_scratch_space_destroy(CTX, NULL); /* no-op */ } static void run_ctz_tests(void) { @@ -578,14 +550,14 @@ static void run_ctz_tests(void) { unsigned i; for (i = 0; i < sizeof(b32) / sizeof(b32[0]); ++i) { for (shift = 0; shift < 32; ++shift) { - CHECK(rustsecp256k1_v0_9_2_ctz32_var_debruijn(b32[i] << shift) == shift); - CHECK(rustsecp256k1_v0_9_2_ctz32_var(b32[i] << shift) == shift); + CHECK(rustsecp256k1_v0_10_0_ctz32_var_debruijn(b32[i] << shift) == shift); + CHECK(rustsecp256k1_v0_10_0_ctz32_var(b32[i] << shift) == shift); } } for (i = 0; i < sizeof(b64) / sizeof(b64[0]); ++i) { for (shift = 0; shift < 64; ++shift) { - CHECK(rustsecp256k1_v0_9_2_ctz64_var_debruijn(b64[i] << shift) == shift); - CHECK(rustsecp256k1_v0_9_2_ctz64_var(b64[i] << shift) == shift); + CHECK(rustsecp256k1_v0_10_0_ctz64_var_debruijn(b64[i] << shift) == shift); + CHECK(rustsecp256k1_v0_10_0_ctz64_var(b64[i] << shift) == shift); } } } @@ -622,29 +594,29 @@ static void run_sha256_known_output_tests(void) { for (i = 0; i < ninputs; i++) { unsigned char out[32]; - rustsecp256k1_v0_9_2_sha256 hasher; + rustsecp256k1_v0_10_0_sha256 hasher; unsigned int j; /* 1. Run: simply write the input bytestrings */ j = repeat[i]; - rustsecp256k1_v0_9_2_sha256_initialize(&hasher); + rustsecp256k1_v0_10_0_sha256_initialize(&hasher); while (j > 0) { - rustsecp256k1_v0_9_2_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i])); + rustsecp256k1_v0_10_0_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i])); j--; } - rustsecp256k1_v0_9_2_sha256_finalize(&hasher, out); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(out, outputs[i], 32) == 0); + rustsecp256k1_v0_10_0_sha256_finalize(&hasher, out); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(out, outputs[i], 32) == 0); /* 2. Run: split the input bytestrings randomly before writing */ if (strlen(inputs[i]) > 0) { - int split = rustsecp256k1_v0_9_2_testrand_int(strlen(inputs[i])); - rustsecp256k1_v0_9_2_sha256_initialize(&hasher); + int split = rustsecp256k1_v0_10_0_testrand_int(strlen(inputs[i])); + rustsecp256k1_v0_10_0_sha256_initialize(&hasher); j = repeat[i]; while (j > 0) { - rustsecp256k1_v0_9_2_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split); - rustsecp256k1_v0_9_2_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split); + rustsecp256k1_v0_10_0_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split); + rustsecp256k1_v0_10_0_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split); j--; } - rustsecp256k1_v0_9_2_sha256_finalize(&hasher, out); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(out, outputs[i], 32) == 0); + rustsecp256k1_v0_10_0_sha256_finalize(&hasher, out); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(out, outputs[i], 32) == 0); } } } @@ -695,7 +667,7 @@ for x in digests: */ static void run_sha256_counter_tests(void) { static const char *input = "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmno"; - static const rustsecp256k1_v0_9_2_sha256 midstates[] = { + static const rustsecp256k1_v0_10_0_sha256 midstates[] = { {{0xa2b5c8bb, 0x26c88bb3, 0x2abdc3d2, 0x9def99a3, 0xdfd21a6e, 0x41fe585b, 0x7ef2c440, 0x2b79adda}, {0x00}, 0xfffc0}, {{0xa0d29445, 0x9287de66, 0x76aabd71, 0x41acd765, 0x0c7528b4, 0x84e14906, 0x942faec6, 0xcc5a7b26}, @@ -744,22 +716,22 @@ static void run_sha256_counter_tests(void) { unsigned int i; for (i = 0; i < sizeof(midstates)/sizeof(midstates[0]); i++) { unsigned char out[32]; - rustsecp256k1_v0_9_2_sha256 hasher = midstates[i]; - rustsecp256k1_v0_9_2_sha256_write(&hasher, (const unsigned char*)input, strlen(input)); - rustsecp256k1_v0_9_2_sha256_finalize(&hasher, out); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(out, outputs[i], 32) == 0); + rustsecp256k1_v0_10_0_sha256 hasher = midstates[i]; + rustsecp256k1_v0_10_0_sha256_write(&hasher, (const unsigned char*)input, strlen(input)); + rustsecp256k1_v0_10_0_sha256_finalize(&hasher, out); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(out, outputs[i], 32) == 0); } } /* Tests for the equality of two sha256 structs. This function only produces a * correct result if an integer multiple of 64 many bytes have been written * into the hash functions. This function is used by some module tests. */ -static void test_sha256_eq(const rustsecp256k1_v0_9_2_sha256 *sha1, const rustsecp256k1_v0_9_2_sha256 *sha2) { +static void test_sha256_eq(const rustsecp256k1_v0_10_0_sha256 *sha1, const rustsecp256k1_v0_10_0_sha256 *sha2) { /* Is buffer fully consumed? */ CHECK((sha1->bytes & 0x3F) == 0); CHECK(sha1->bytes == sha2->bytes); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(sha1->s, sha2->s, sizeof(sha1->s)) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(sha1->s, sha2->s, sizeof(sha1->s)) == 0); } static void run_hmac_sha256_tests(void) { @@ -789,19 +761,19 @@ static void run_hmac_sha256_tests(void) { }; int i; for (i = 0; i < 6; i++) { - rustsecp256k1_v0_9_2_hmac_sha256 hasher; + rustsecp256k1_v0_10_0_hmac_sha256 hasher; unsigned char out[32]; - rustsecp256k1_v0_9_2_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i])); - rustsecp256k1_v0_9_2_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i])); - rustsecp256k1_v0_9_2_hmac_sha256_finalize(&hasher, out); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(out, outputs[i], 32) == 0); + rustsecp256k1_v0_10_0_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i])); + rustsecp256k1_v0_10_0_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i])); + rustsecp256k1_v0_10_0_hmac_sha256_finalize(&hasher, out); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(out, outputs[i], 32) == 0); if (strlen(inputs[i]) > 0) { - int split = rustsecp256k1_v0_9_2_testrand_int(strlen(inputs[i])); - rustsecp256k1_v0_9_2_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i])); - rustsecp256k1_v0_9_2_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split); - rustsecp256k1_v0_9_2_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split); - rustsecp256k1_v0_9_2_hmac_sha256_finalize(&hasher, out); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(out, outputs[i], 32) == 0); + int split = rustsecp256k1_v0_10_0_testrand_int(strlen(inputs[i])); + rustsecp256k1_v0_10_0_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i])); + rustsecp256k1_v0_10_0_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split); + rustsecp256k1_v0_10_0_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split); + rustsecp256k1_v0_10_0_hmac_sha256_finalize(&hasher, out); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(out, outputs[i], 32) == 0); } } } @@ -821,34 +793,33 @@ static void run_rfc6979_hmac_sha256_tests(void) { {0x75, 0x97, 0x88, 0x7c, 0xbd, 0x76, 0x32, 0x1f, 0x32, 0xe3, 0x04, 0x40, 0x67, 0x9a, 0x22, 0xcf, 0x7f, 0x8d, 0x9d, 0x2e, 0xac, 0x39, 0x0e, 0x58, 0x1f, 0xea, 0x09, 0x1c, 0xe2, 0x02, 0xba, 0x94} }; - rustsecp256k1_v0_9_2_rfc6979_hmac_sha256 rng; + rustsecp256k1_v0_10_0_rfc6979_hmac_sha256 rng; unsigned char out[32]; int i; - rustsecp256k1_v0_9_2_rfc6979_hmac_sha256_initialize(&rng, key1, 64); + rustsecp256k1_v0_10_0_rfc6979_hmac_sha256_initialize(&rng, key1, 64); for (i = 0; i < 3; i++) { - rustsecp256k1_v0_9_2_rfc6979_hmac_sha256_generate(&rng, out, 32); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(out, out1[i], 32) == 0); + rustsecp256k1_v0_10_0_rfc6979_hmac_sha256_generate(&rng, out, 32); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(out, out1[i], 32) == 0); } - rustsecp256k1_v0_9_2_rfc6979_hmac_sha256_finalize(&rng); + rustsecp256k1_v0_10_0_rfc6979_hmac_sha256_finalize(&rng); - rustsecp256k1_v0_9_2_rfc6979_hmac_sha256_initialize(&rng, key1, 65); + rustsecp256k1_v0_10_0_rfc6979_hmac_sha256_initialize(&rng, key1, 65); for (i = 0; i < 3; i++) { - rustsecp256k1_v0_9_2_rfc6979_hmac_sha256_generate(&rng, out, 32); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(out, out1[i], 32) != 0); + rustsecp256k1_v0_10_0_rfc6979_hmac_sha256_generate(&rng, out, 32); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(out, out1[i], 32) != 0); } - rustsecp256k1_v0_9_2_rfc6979_hmac_sha256_finalize(&rng); + rustsecp256k1_v0_10_0_rfc6979_hmac_sha256_finalize(&rng); - rustsecp256k1_v0_9_2_rfc6979_hmac_sha256_initialize(&rng, key2, 64); + rustsecp256k1_v0_10_0_rfc6979_hmac_sha256_initialize(&rng, key2, 64); for (i = 0; i < 3; i++) { - rustsecp256k1_v0_9_2_rfc6979_hmac_sha256_generate(&rng, out, 32); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(out, out2[i], 32) == 0); + rustsecp256k1_v0_10_0_rfc6979_hmac_sha256_generate(&rng, out, 32); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(out, out2[i], 32) == 0); } - rustsecp256k1_v0_9_2_rfc6979_hmac_sha256_finalize(&rng); + rustsecp256k1_v0_10_0_rfc6979_hmac_sha256_finalize(&rng); } static void run_tagged_sha256_tests(void) { - int ecount = 0; unsigned char tag[32] = { 0 }; unsigned char msg[32] = { 0 }; unsigned char hash32[32]; @@ -859,22 +830,17 @@ static void run_tagged_sha256_tests(void) { 0xE2, 0x76, 0x55, 0x9A, 0x3B, 0xDE, 0x55, 0xB3 }; - rustsecp256k1_v0_9_2_context_set_illegal_callback(CTX, counting_illegal_callback_fn, &ecount); - /* API test */ - CHECK(rustsecp256k1_v0_9_2_tagged_sha256(CTX, hash32, tag, sizeof(tag), msg, sizeof(msg)) == 1); - CHECK(rustsecp256k1_v0_9_2_tagged_sha256(CTX, NULL, tag, sizeof(tag), msg, sizeof(msg)) == 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_tagged_sha256(CTX, hash32, NULL, 0, msg, sizeof(msg)) == 0); - CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_9_2_tagged_sha256(CTX, hash32, tag, sizeof(tag), NULL, 0) == 0); - CHECK(ecount == 3); + CHECK(rustsecp256k1_v0_10_0_tagged_sha256(CTX, hash32, tag, sizeof(tag), msg, sizeof(msg)) == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_tagged_sha256(CTX, NULL, tag, sizeof(tag), msg, sizeof(msg))); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_tagged_sha256(CTX, hash32, NULL, 0, msg, sizeof(msg))); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_tagged_sha256(CTX, hash32, tag, sizeof(tag), NULL, 0)); /* Static test vector */ memcpy(tag, "tag", 3); memcpy(msg, "msg", 3); - CHECK(rustsecp256k1_v0_9_2_tagged_sha256(CTX, hash32, tag, 3, msg, 3) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(hash32, hash_expected, sizeof(hash32)) == 0); + CHECK(rustsecp256k1_v0_10_0_tagged_sha256(CTX, hash32, tag, 3, msg, 3) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(hash32, hash_expected, sizeof(hash32)) == 0); } /***** MODINV TESTS *****/ @@ -981,7 +947,7 @@ static void mulmod256(uint16_t* out, const uint16_t* a, const uint16_t* b, const } /* Convert a 256-bit number represented as 16 uint16_t's to signed30 notation. */ -static void uint16_to_signed30(rustsecp256k1_v0_9_2_modinv32_signed30* out, const uint16_t* in) { +static void uint16_to_signed30(rustsecp256k1_v0_10_0_modinv32_signed30* out, const uint16_t* in) { int i; memset(out->v, 0, sizeof(out->v)); for (i = 0; i < 256; ++i) { @@ -990,7 +956,7 @@ static void uint16_to_signed30(rustsecp256k1_v0_9_2_modinv32_signed30* out, cons } /* Convert a 256-bit number in signed30 notation to a representation as 16 uint16_t's. */ -static void signed30_to_uint16(uint16_t* out, const rustsecp256k1_v0_9_2_modinv32_signed30* in) { +static void signed30_to_uint16(uint16_t* out, const rustsecp256k1_v0_10_0_modinv32_signed30* in) { int i; memset(out, 0, 32); for (i = 0; i < 256; ++i) { @@ -999,10 +965,10 @@ static void signed30_to_uint16(uint16_t* out, const rustsecp256k1_v0_9_2_modinv3 } /* Randomly mutate the sign of limbs in signed30 representation, without changing the value. */ -static void mutate_sign_signed30(rustsecp256k1_v0_9_2_modinv32_signed30* x) { +static void mutate_sign_signed30(rustsecp256k1_v0_10_0_modinv32_signed30* x) { int i; for (i = 0; i < 16; ++i) { - int pos = rustsecp256k1_v0_9_2_testrand_bits(3); + int pos = rustsecp256k1_v0_10_0_testrand_bits(3); if (x->v[pos] > 0 && x->v[pos + 1] <= 0x3fffffff) { x->v[pos] -= 0x40000000; x->v[pos + 1] += 1; @@ -1013,11 +979,11 @@ static void mutate_sign_signed30(rustsecp256k1_v0_9_2_modinv32_signed30* x) { } } -/* Test rustsecp256k1_v0_9_2_modinv32{_var}, using inputs in 16-bit limb format, and returning inverse. */ +/* Test rustsecp256k1_v0_10_0_modinv32{_var}, using inputs in 16-bit limb format, and returning inverse. */ static void test_modinv32_uint16(uint16_t* out, const uint16_t* in, const uint16_t* mod) { uint16_t tmp[16]; - rustsecp256k1_v0_9_2_modinv32_signed30 x; - rustsecp256k1_v0_9_2_modinv32_modinfo m; + rustsecp256k1_v0_10_0_modinv32_signed30 x; + rustsecp256k1_v0_10_0_modinv32_modinfo m; int i, vartime, nonzero; uint16_to_signed30(&x, in); @@ -1028,14 +994,14 @@ static void test_modinv32_uint16(uint16_t* out, const uint16_t* in, const uint16 m.modulus_inv30 = modinv2p64(m.modulus.v[0]) & 0x3fffffff; CHECK(((m.modulus_inv30 * m.modulus.v[0]) & 0x3fffffff) == 1); - /* Test rustsecp256k1_v0_9_2_jacobi32_maybe_var. */ + /* Test rustsecp256k1_v0_10_0_jacobi32_maybe_var. */ if (nonzero) { int jac; uint16_t sqr[16], negone[16]; mulmod256(sqr, in, in, mod); uint16_to_signed30(&x, sqr); /* Compute jacobi symbol of in^2, which must be 1 (or uncomputable). */ - jac = rustsecp256k1_v0_9_2_jacobi32_maybe_var(&x, &m); + jac = rustsecp256k1_v0_10_0_jacobi32_maybe_var(&x, &m); CHECK(jac == 0 || jac == 1); /* Then compute the jacobi symbol of -(in^2). x and -x have opposite * jacobi symbols if and only if (mod % 4) == 3. */ @@ -1043,7 +1009,7 @@ static void test_modinv32_uint16(uint16_t* out, const uint16_t* in, const uint16 for (i = 1; i < 16; ++i) negone[i] = mod[i]; mulmod256(sqr, sqr, negone, mod); uint16_to_signed30(&x, sqr); - jac = rustsecp256k1_v0_9_2_jacobi32_maybe_var(&x, &m); + jac = rustsecp256k1_v0_10_0_jacobi32_maybe_var(&x, &m); CHECK(jac == 0 || jac == 1 - (mod[0] & 2)); } @@ -1051,7 +1017,7 @@ static void test_modinv32_uint16(uint16_t* out, const uint16_t* in, const uint16 mutate_sign_signed30(&m.modulus); for (vartime = 0; vartime < 2; ++vartime) { /* compute inverse */ - (vartime ? rustsecp256k1_v0_9_2_modinv32_var : rustsecp256k1_v0_9_2_modinv32)(&x, &m); + (vartime ? rustsecp256k1_v0_10_0_modinv32_var : rustsecp256k1_v0_10_0_modinv32)(&x, &m); /* produce output */ signed30_to_uint16(out, &x); @@ -1062,7 +1028,7 @@ static void test_modinv32_uint16(uint16_t* out, const uint16_t* in, const uint16 for (i = 1; i < 16; ++i) CHECK(tmp[i] == 0); /* invert again */ - (vartime ? rustsecp256k1_v0_9_2_modinv32_var : rustsecp256k1_v0_9_2_modinv32)(&x, &m); + (vartime ? rustsecp256k1_v0_10_0_modinv32_var : rustsecp256k1_v0_10_0_modinv32)(&x, &m); /* check if the result is equal to the input */ signed30_to_uint16(tmp, &x); @@ -1072,7 +1038,7 @@ static void test_modinv32_uint16(uint16_t* out, const uint16_t* in, const uint16 #ifdef SECP256K1_WIDEMUL_INT128 /* Convert a 256-bit number represented as 16 uint16_t's to signed62 notation. */ -static void uint16_to_signed62(rustsecp256k1_v0_9_2_modinv64_signed62* out, const uint16_t* in) { +static void uint16_to_signed62(rustsecp256k1_v0_10_0_modinv64_signed62* out, const uint16_t* in) { int i; memset(out->v, 0, sizeof(out->v)); for (i = 0; i < 256; ++i) { @@ -1081,7 +1047,7 @@ static void uint16_to_signed62(rustsecp256k1_v0_9_2_modinv64_signed62* out, cons } /* Convert a 256-bit number in signed62 notation to a representation as 16 uint16_t's. */ -static void signed62_to_uint16(uint16_t* out, const rustsecp256k1_v0_9_2_modinv64_signed62* in) { +static void signed62_to_uint16(uint16_t* out, const rustsecp256k1_v0_10_0_modinv64_signed62* in) { int i; memset(out, 0, 32); for (i = 0; i < 256; ++i) { @@ -1090,11 +1056,11 @@ static void signed62_to_uint16(uint16_t* out, const rustsecp256k1_v0_9_2_modinv6 } /* Randomly mutate the sign of limbs in signed62 representation, without changing the value. */ -static void mutate_sign_signed62(rustsecp256k1_v0_9_2_modinv64_signed62* x) { +static void mutate_sign_signed62(rustsecp256k1_v0_10_0_modinv64_signed62* x) { static const int64_t M62 = (int64_t)(UINT64_MAX >> 2); int i; for (i = 0; i < 8; ++i) { - int pos = rustsecp256k1_v0_9_2_testrand_bits(2); + int pos = rustsecp256k1_v0_10_0_testrand_bits(2); if (x->v[pos] > 0 && x->v[pos + 1] <= M62) { x->v[pos] -= (M62 + 1); x->v[pos + 1] += 1; @@ -1105,12 +1071,12 @@ static void mutate_sign_signed62(rustsecp256k1_v0_9_2_modinv64_signed62* x) { } } -/* Test rustsecp256k1_v0_9_2_modinv64{_var}, using inputs in 16-bit limb format, and returning inverse. */ +/* Test rustsecp256k1_v0_10_0_modinv64{_var}, using inputs in 16-bit limb format, and returning inverse. */ static void test_modinv64_uint16(uint16_t* out, const uint16_t* in, const uint16_t* mod) { static const int64_t M62 = (int64_t)(UINT64_MAX >> 2); uint16_t tmp[16]; - rustsecp256k1_v0_9_2_modinv64_signed62 x; - rustsecp256k1_v0_9_2_modinv64_modinfo m; + rustsecp256k1_v0_10_0_modinv64_signed62 x; + rustsecp256k1_v0_10_0_modinv64_modinfo m; int i, vartime, nonzero; uint16_to_signed62(&x, in); @@ -1121,14 +1087,14 @@ static void test_modinv64_uint16(uint16_t* out, const uint16_t* in, const uint16 m.modulus_inv62 = modinv2p64(m.modulus.v[0]) & M62; CHECK(((m.modulus_inv62 * m.modulus.v[0]) & M62) == 1); - /* Test rustsecp256k1_v0_9_2_jacobi64_maybe_var. */ + /* Test rustsecp256k1_v0_10_0_jacobi64_maybe_var. */ if (nonzero) { int jac; uint16_t sqr[16], negone[16]; mulmod256(sqr, in, in, mod); uint16_to_signed62(&x, sqr); /* Compute jacobi symbol of in^2, which must be 1 (or uncomputable). */ - jac = rustsecp256k1_v0_9_2_jacobi64_maybe_var(&x, &m); + jac = rustsecp256k1_v0_10_0_jacobi64_maybe_var(&x, &m); CHECK(jac == 0 || jac == 1); /* Then compute the jacobi symbol of -(in^2). x and -x have opposite * jacobi symbols if and only if (mod % 4) == 3. */ @@ -1136,7 +1102,7 @@ static void test_modinv64_uint16(uint16_t* out, const uint16_t* in, const uint16 for (i = 1; i < 16; ++i) negone[i] = mod[i]; mulmod256(sqr, sqr, negone, mod); uint16_to_signed62(&x, sqr); - jac = rustsecp256k1_v0_9_2_jacobi64_maybe_var(&x, &m); + jac = rustsecp256k1_v0_10_0_jacobi64_maybe_var(&x, &m); CHECK(jac == 0 || jac == 1 - (mod[0] & 2)); } @@ -1144,7 +1110,7 @@ static void test_modinv64_uint16(uint16_t* out, const uint16_t* in, const uint16 mutate_sign_signed62(&m.modulus); for (vartime = 0; vartime < 2; ++vartime) { /* compute inverse */ - (vartime ? rustsecp256k1_v0_9_2_modinv64_var : rustsecp256k1_v0_9_2_modinv64)(&x, &m); + (vartime ? rustsecp256k1_v0_10_0_modinv64_var : rustsecp256k1_v0_10_0_modinv64)(&x, &m); /* produce output */ signed62_to_uint16(out, &x); @@ -1155,7 +1121,7 @@ static void test_modinv64_uint16(uint16_t* out, const uint16_t* in, const uint16 for (i = 1; i < 16; ++i) CHECK(tmp[i] == 0); /* invert again */ - (vartime ? rustsecp256k1_v0_9_2_modinv64_var : rustsecp256k1_v0_9_2_modinv64)(&x, &m); + (vartime ? rustsecp256k1_v0_10_0_modinv64_var : rustsecp256k1_v0_10_0_modinv64)(&x, &m); /* check if the result is equal to the input */ signed62_to_uint16(tmp, &x); @@ -1807,8 +1773,8 @@ static void run_modinv_tests(void) { /* generate random xd and md, so that md is odd, md>1, xd> (j % 16)) uwa_bits = 1 + j; } for (j = 0; j < 128; ++j) { - CHECK(rustsecp256k1_v0_9_2_u128_check_bits(&uwa, j) == (uwa_bits <= j)); + CHECK(rustsecp256k1_v0_10_0_u128_check_bits(&uwa, j) == (uwa_bits <= j)); } } - /* test rustsecp256k1_v0_9_2_i128_mul */ + /* test rustsecp256k1_v0_10_0_i128_mul */ mulmod256(rswr, rsb, rsc, NULL); - rustsecp256k1_v0_9_2_i128_mul(&swz, sb, sc); + rustsecp256k1_v0_10_0_i128_mul(&swz, sb, sc); load256i128(rswz, &swz); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(rswr, rswz, 16) == 0); - /* test rustsecp256k1_v0_9_2_i128_accum_mul */ + CHECK(rustsecp256k1_v0_10_0_memcmp_var(rswr, rswz, 16) == 0); + /* test rustsecp256k1_v0_10_0_i128_accum_mul */ mulmod256(rswr, rsb, rsc, NULL); add256(rswr, rswr, rswa); if (int256is127(rswr)) { swz = swa; - rustsecp256k1_v0_9_2_i128_accum_mul(&swz, sb, sc); + rustsecp256k1_v0_10_0_i128_accum_mul(&swz, sb, sc); load256i128(rswz, &swz); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(rswr, rswz, 16) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(rswr, rswz, 16) == 0); } - /* test rustsecp256k1_v0_9_2_i128_det */ + /* test rustsecp256k1_v0_10_0_i128_det */ { uint16_t rsd[16], rse[16], rst[32]; int64_t sd = v[0], se = v[1]; @@ -2029,25 +1995,25 @@ static void run_int128_test_case(void) { neg256(rst, rst); mulmod256(rswr, rsb, rse, NULL); add256(rswr, rswr, rst); - rustsecp256k1_v0_9_2_i128_det(&swz, sb, sc, sd, se); + rustsecp256k1_v0_10_0_i128_det(&swz, sb, sc, sd, se); load256i128(rswz, &swz); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(rswr, rswz, 16) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(rswr, rswz, 16) == 0); } - /* test rustsecp256k1_v0_9_2_i128_rshift */ + /* test rustsecp256k1_v0_10_0_i128_rshift */ rshift256(rswr, rswa, uc % 127, 1); swz = swa; - rustsecp256k1_v0_9_2_i128_rshift(&swz, uc % 127); + rustsecp256k1_v0_10_0_i128_rshift(&swz, uc % 127); load256i128(rswz, &swz); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(rswr, rswz, 16) == 0); - /* test rustsecp256k1_v0_9_2_i128_to_u64 */ - CHECK(rustsecp256k1_v0_9_2_i128_to_u64(&swa) == v[0]); - /* test rustsecp256k1_v0_9_2_i128_from_i64 */ - rustsecp256k1_v0_9_2_i128_from_i64(&swz, sb); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(rswr, rswz, 16) == 0); + /* test rustsecp256k1_v0_10_0_i128_to_u64 */ + CHECK(rustsecp256k1_v0_10_0_i128_to_u64(&swa) == v[0]); + /* test rustsecp256k1_v0_10_0_i128_from_i64 */ + rustsecp256k1_v0_10_0_i128_from_i64(&swz, sb); load256i128(rswz, &swz); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(rsb, rswz, 16) == 0); - /* test rustsecp256k1_v0_9_2_i128_to_i64 */ - CHECK(rustsecp256k1_v0_9_2_i128_to_i64(&swz) == sb); - /* test rustsecp256k1_v0_9_2_i128_eq_var */ + CHECK(rustsecp256k1_v0_10_0_memcmp_var(rsb, rswz, 16) == 0); + /* test rustsecp256k1_v0_10_0_i128_to_i64 */ + CHECK(rustsecp256k1_v0_10_0_i128_to_i64(&swz) == sb); + /* test rustsecp256k1_v0_10_0_i128_eq_var */ { int expect = (uc & 1); swz = swa; @@ -2059,11 +2025,11 @@ static void run_int128_test_case(void) { } else { v0c ^= (((uint64_t)1) << (ub & 63)); } - rustsecp256k1_v0_9_2_i128_load(&swz, v1c, v0c); + rustsecp256k1_v0_10_0_i128_load(&swz, v1c, v0c); } - CHECK(rustsecp256k1_v0_9_2_i128_eq_var(&swa, &swz) == expect); + CHECK(rustsecp256k1_v0_10_0_i128_eq_var(&swa, &swz) == expect); } - /* test rustsecp256k1_v0_9_2_i128_check_pow2 (sign == 1) */ + /* test rustsecp256k1_v0_10_0_i128_check_pow2 (sign == 1) */ { int expect = (uc & 1); int pos = ub % 127; @@ -2076,7 +2042,7 @@ static void run_int128_test_case(void) { } else { lo = (((uint64_t)1) << (pos & 63)); } - rustsecp256k1_v0_9_2_i128_load(&swz, hi, lo); + rustsecp256k1_v0_10_0_i128_load(&swz, hi, lo); } else { /* If expect==0, set swz = swa, but update expect=1 if swa happens to equal 2^pos. */ if (pos >= 64) { @@ -2086,9 +2052,9 @@ static void run_int128_test_case(void) { } swz = swa; } - CHECK(rustsecp256k1_v0_9_2_i128_check_pow2(&swz, pos, 1) == expect); + CHECK(rustsecp256k1_v0_10_0_i128_check_pow2(&swz, pos, 1) == expect); } - /* test rustsecp256k1_v0_9_2_i128_check_pow2 (sign == -1) */ + /* test rustsecp256k1_v0_10_0_i128_check_pow2 (sign == -1) */ { int expect = (uc & 1); int pos = ub % 127; @@ -2102,7 +2068,7 @@ static void run_int128_test_case(void) { } else { lo <<= (pos & 63); } - rustsecp256k1_v0_9_2_i128_load(&swz, hi, lo); + rustsecp256k1_v0_10_0_i128_load(&swz, hi, lo); } else { /* If expect==0, set swz = swa, but update expect=1 if swa happens to equal -2^pos. */ if (pos >= 64) { @@ -2112,42 +2078,42 @@ static void run_int128_test_case(void) { } swz = swa; } - CHECK(rustsecp256k1_v0_9_2_i128_check_pow2(&swz, pos, -1) == expect); + CHECK(rustsecp256k1_v0_10_0_i128_check_pow2(&swz, pos, -1) == expect); } } static void run_int128_tests(void) { - { /* rustsecp256k1_v0_9_2_u128_accum_mul */ - rustsecp256k1_v0_9_2_uint128 res; - - /* Check rustsecp256k1_v0_9_2_u128_accum_mul overflow */ - rustsecp256k1_v0_9_2_u128_mul(&res, UINT64_MAX, UINT64_MAX); - rustsecp256k1_v0_9_2_u128_accum_mul(&res, UINT64_MAX, UINT64_MAX); - CHECK(rustsecp256k1_v0_9_2_u128_to_u64(&res) == 2); - CHECK(rustsecp256k1_v0_9_2_u128_hi_u64(&res) == 18446744073709551612U); - } - { /* rustsecp256k1_v0_9_2_u128_accum_mul */ - rustsecp256k1_v0_9_2_int128 res; - - /* Compute INT128_MAX = 2^127 - 1 with rustsecp256k1_v0_9_2_i128_accum_mul */ - rustsecp256k1_v0_9_2_i128_mul(&res, INT64_MAX, INT64_MAX); - rustsecp256k1_v0_9_2_i128_accum_mul(&res, INT64_MAX, INT64_MAX); - CHECK(rustsecp256k1_v0_9_2_i128_to_u64(&res) == 2); - rustsecp256k1_v0_9_2_i128_accum_mul(&res, 4, 9223372036854775807); - rustsecp256k1_v0_9_2_i128_accum_mul(&res, 1, 1); - CHECK(rustsecp256k1_v0_9_2_i128_to_u64(&res) == UINT64_MAX); - rustsecp256k1_v0_9_2_i128_rshift(&res, 64); - CHECK(rustsecp256k1_v0_9_2_i128_to_i64(&res) == INT64_MAX); - - /* Compute INT128_MIN = - 2^127 with rustsecp256k1_v0_9_2_i128_accum_mul */ - rustsecp256k1_v0_9_2_i128_mul(&res, INT64_MAX, INT64_MIN); - CHECK(rustsecp256k1_v0_9_2_i128_to_u64(&res) == (uint64_t)INT64_MIN); - rustsecp256k1_v0_9_2_i128_accum_mul(&res, INT64_MAX, INT64_MIN); - CHECK(rustsecp256k1_v0_9_2_i128_to_u64(&res) == 0); - rustsecp256k1_v0_9_2_i128_accum_mul(&res, 2, INT64_MIN); - CHECK(rustsecp256k1_v0_9_2_i128_to_u64(&res) == 0); - rustsecp256k1_v0_9_2_i128_rshift(&res, 64); - CHECK(rustsecp256k1_v0_9_2_i128_to_i64(&res) == INT64_MIN); + { /* rustsecp256k1_v0_10_0_u128_accum_mul */ + rustsecp256k1_v0_10_0_uint128 res; + + /* Check rustsecp256k1_v0_10_0_u128_accum_mul overflow */ + rustsecp256k1_v0_10_0_u128_mul(&res, UINT64_MAX, UINT64_MAX); + rustsecp256k1_v0_10_0_u128_accum_mul(&res, UINT64_MAX, UINT64_MAX); + CHECK(rustsecp256k1_v0_10_0_u128_to_u64(&res) == 2); + CHECK(rustsecp256k1_v0_10_0_u128_hi_u64(&res) == 18446744073709551612U); + } + { /* rustsecp256k1_v0_10_0_u128_accum_mul */ + rustsecp256k1_v0_10_0_int128 res; + + /* Compute INT128_MAX = 2^127 - 1 with rustsecp256k1_v0_10_0_i128_accum_mul */ + rustsecp256k1_v0_10_0_i128_mul(&res, INT64_MAX, INT64_MAX); + rustsecp256k1_v0_10_0_i128_accum_mul(&res, INT64_MAX, INT64_MAX); + CHECK(rustsecp256k1_v0_10_0_i128_to_u64(&res) == 2); + rustsecp256k1_v0_10_0_i128_accum_mul(&res, 4, 9223372036854775807); + rustsecp256k1_v0_10_0_i128_accum_mul(&res, 1, 1); + CHECK(rustsecp256k1_v0_10_0_i128_to_u64(&res) == UINT64_MAX); + rustsecp256k1_v0_10_0_i128_rshift(&res, 64); + CHECK(rustsecp256k1_v0_10_0_i128_to_i64(&res) == INT64_MAX); + + /* Compute INT128_MIN = - 2^127 with rustsecp256k1_v0_10_0_i128_accum_mul */ + rustsecp256k1_v0_10_0_i128_mul(&res, INT64_MAX, INT64_MIN); + CHECK(rustsecp256k1_v0_10_0_i128_to_u64(&res) == (uint64_t)INT64_MIN); + rustsecp256k1_v0_10_0_i128_accum_mul(&res, INT64_MAX, INT64_MIN); + CHECK(rustsecp256k1_v0_10_0_i128_to_u64(&res) == 0); + rustsecp256k1_v0_10_0_i128_accum_mul(&res, 2, INT64_MIN); + CHECK(rustsecp256k1_v0_10_0_i128_to_u64(&res) == 0); + rustsecp256k1_v0_10_0_i128_rshift(&res, 64); + CHECK(rustsecp256k1_v0_10_0_i128_to_i64(&res) == INT64_MIN); } { /* Randomized tests. */ @@ -2160,9 +2126,9 @@ static void run_int128_tests(void) { /***** SCALAR TESTS *****/ static void scalar_test(void) { - rustsecp256k1_v0_9_2_scalar s; - rustsecp256k1_v0_9_2_scalar s1; - rustsecp256k1_v0_9_2_scalar s2; + rustsecp256k1_v0_10_0_scalar s; + rustsecp256k1_v0_10_0_scalar s1; + rustsecp256k1_v0_10_0_scalar s2; unsigned char c[32]; /* Set 's' to a random scalar, with value 'snum'. */ @@ -2173,169 +2139,162 @@ static void scalar_test(void) { /* Set 's2' to a random scalar, with value 'snum2', and byte array representation 'c'. */ random_scalar_order_test(&s2); - rustsecp256k1_v0_9_2_scalar_get_b32(c, &s2); + rustsecp256k1_v0_10_0_scalar_get_b32(c, &s2); { int i; /* Test that fetching groups of 4 bits from a scalar and recursing n(i)=16*n(i-1)+p(i) reconstructs it. */ - rustsecp256k1_v0_9_2_scalar n; - rustsecp256k1_v0_9_2_scalar_set_int(&n, 0); + rustsecp256k1_v0_10_0_scalar n; + rustsecp256k1_v0_10_0_scalar_set_int(&n, 0); for (i = 0; i < 256; i += 4) { - rustsecp256k1_v0_9_2_scalar t; + rustsecp256k1_v0_10_0_scalar t; int j; - rustsecp256k1_v0_9_2_scalar_set_int(&t, rustsecp256k1_v0_9_2_scalar_get_bits(&s, 256 - 4 - i, 4)); + rustsecp256k1_v0_10_0_scalar_set_int(&t, rustsecp256k1_v0_10_0_scalar_get_bits(&s, 256 - 4 - i, 4)); for (j = 0; j < 4; j++) { - rustsecp256k1_v0_9_2_scalar_add(&n, &n, &n); + rustsecp256k1_v0_10_0_scalar_add(&n, &n, &n); } - rustsecp256k1_v0_9_2_scalar_add(&n, &n, &t); + rustsecp256k1_v0_10_0_scalar_add(&n, &n, &t); } - CHECK(rustsecp256k1_v0_9_2_scalar_eq(&n, &s)); + CHECK(rustsecp256k1_v0_10_0_scalar_eq(&n, &s)); } { /* Test that fetching groups of randomly-sized bits from a scalar and recursing n(i)=b*n(i-1)+p(i) reconstructs it. */ - rustsecp256k1_v0_9_2_scalar n; + rustsecp256k1_v0_10_0_scalar n; int i = 0; - rustsecp256k1_v0_9_2_scalar_set_int(&n, 0); + rustsecp256k1_v0_10_0_scalar_set_int(&n, 0); while (i < 256) { - rustsecp256k1_v0_9_2_scalar t; + rustsecp256k1_v0_10_0_scalar t; int j; - int now = rustsecp256k1_v0_9_2_testrand_int(15) + 1; + int now = rustsecp256k1_v0_10_0_testrand_int(15) + 1; if (now + i > 256) { now = 256 - i; } - rustsecp256k1_v0_9_2_scalar_set_int(&t, rustsecp256k1_v0_9_2_scalar_get_bits_var(&s, 256 - now - i, now)); + rustsecp256k1_v0_10_0_scalar_set_int(&t, rustsecp256k1_v0_10_0_scalar_get_bits_var(&s, 256 - now - i, now)); for (j = 0; j < now; j++) { - rustsecp256k1_v0_9_2_scalar_add(&n, &n, &n); + rustsecp256k1_v0_10_0_scalar_add(&n, &n, &n); } - rustsecp256k1_v0_9_2_scalar_add(&n, &n, &t); + rustsecp256k1_v0_10_0_scalar_add(&n, &n, &t); i += now; } - CHECK(rustsecp256k1_v0_9_2_scalar_eq(&n, &s)); - } - - { - /* test rustsecp256k1_v0_9_2_scalar_shr_int */ - rustsecp256k1_v0_9_2_scalar r; - int i; - random_scalar_order_test(&r); - for (i = 0; i < 100; ++i) { - int low; - int shift = 1 + rustsecp256k1_v0_9_2_testrand_int(15); - int expected = r.d[0] % (1ULL << shift); - low = rustsecp256k1_v0_9_2_scalar_shr_int(&r, shift); - CHECK(expected == low); - } + CHECK(rustsecp256k1_v0_10_0_scalar_eq(&n, &s)); } { /* Test commutativity of add. */ - rustsecp256k1_v0_9_2_scalar r1, r2; - rustsecp256k1_v0_9_2_scalar_add(&r1, &s1, &s2); - rustsecp256k1_v0_9_2_scalar_add(&r2, &s2, &s1); - CHECK(rustsecp256k1_v0_9_2_scalar_eq(&r1, &r2)); + rustsecp256k1_v0_10_0_scalar r1, r2; + rustsecp256k1_v0_10_0_scalar_add(&r1, &s1, &s2); + rustsecp256k1_v0_10_0_scalar_add(&r2, &s2, &s1); + CHECK(rustsecp256k1_v0_10_0_scalar_eq(&r1, &r2)); } { - rustsecp256k1_v0_9_2_scalar r1, r2; - rustsecp256k1_v0_9_2_scalar b; + rustsecp256k1_v0_10_0_scalar r1, r2; + rustsecp256k1_v0_10_0_scalar b; int i; /* Test add_bit. */ - int bit = rustsecp256k1_v0_9_2_testrand_bits(8); - rustsecp256k1_v0_9_2_scalar_set_int(&b, 1); - CHECK(rustsecp256k1_v0_9_2_scalar_is_one(&b)); + int bit = rustsecp256k1_v0_10_0_testrand_bits(8); + rustsecp256k1_v0_10_0_scalar_set_int(&b, 1); + CHECK(rustsecp256k1_v0_10_0_scalar_is_one(&b)); for (i = 0; i < bit; i++) { - rustsecp256k1_v0_9_2_scalar_add(&b, &b, &b); + rustsecp256k1_v0_10_0_scalar_add(&b, &b, &b); } r1 = s1; r2 = s1; - if (!rustsecp256k1_v0_9_2_scalar_add(&r1, &r1, &b)) { + if (!rustsecp256k1_v0_10_0_scalar_add(&r1, &r1, &b)) { /* No overflow happened. */ - rustsecp256k1_v0_9_2_scalar_cadd_bit(&r2, bit, 1); - CHECK(rustsecp256k1_v0_9_2_scalar_eq(&r1, &r2)); + rustsecp256k1_v0_10_0_scalar_cadd_bit(&r2, bit, 1); + CHECK(rustsecp256k1_v0_10_0_scalar_eq(&r1, &r2)); /* cadd is a noop when flag is zero */ - rustsecp256k1_v0_9_2_scalar_cadd_bit(&r2, bit, 0); - CHECK(rustsecp256k1_v0_9_2_scalar_eq(&r1, &r2)); + rustsecp256k1_v0_10_0_scalar_cadd_bit(&r2, bit, 0); + CHECK(rustsecp256k1_v0_10_0_scalar_eq(&r1, &r2)); } } { /* Test commutativity of mul. */ - rustsecp256k1_v0_9_2_scalar r1, r2; - rustsecp256k1_v0_9_2_scalar_mul(&r1, &s1, &s2); - rustsecp256k1_v0_9_2_scalar_mul(&r2, &s2, &s1); - CHECK(rustsecp256k1_v0_9_2_scalar_eq(&r1, &r2)); + rustsecp256k1_v0_10_0_scalar r1, r2; + rustsecp256k1_v0_10_0_scalar_mul(&r1, &s1, &s2); + rustsecp256k1_v0_10_0_scalar_mul(&r2, &s2, &s1); + CHECK(rustsecp256k1_v0_10_0_scalar_eq(&r1, &r2)); } { /* Test associativity of add. */ - rustsecp256k1_v0_9_2_scalar r1, r2; - rustsecp256k1_v0_9_2_scalar_add(&r1, &s1, &s2); - rustsecp256k1_v0_9_2_scalar_add(&r1, &r1, &s); - rustsecp256k1_v0_9_2_scalar_add(&r2, &s2, &s); - rustsecp256k1_v0_9_2_scalar_add(&r2, &s1, &r2); - CHECK(rustsecp256k1_v0_9_2_scalar_eq(&r1, &r2)); + rustsecp256k1_v0_10_0_scalar r1, r2; + rustsecp256k1_v0_10_0_scalar_add(&r1, &s1, &s2); + rustsecp256k1_v0_10_0_scalar_add(&r1, &r1, &s); + rustsecp256k1_v0_10_0_scalar_add(&r2, &s2, &s); + rustsecp256k1_v0_10_0_scalar_add(&r2, &s1, &r2); + CHECK(rustsecp256k1_v0_10_0_scalar_eq(&r1, &r2)); } { /* Test associativity of mul. */ - rustsecp256k1_v0_9_2_scalar r1, r2; - rustsecp256k1_v0_9_2_scalar_mul(&r1, &s1, &s2); - rustsecp256k1_v0_9_2_scalar_mul(&r1, &r1, &s); - rustsecp256k1_v0_9_2_scalar_mul(&r2, &s2, &s); - rustsecp256k1_v0_9_2_scalar_mul(&r2, &s1, &r2); - CHECK(rustsecp256k1_v0_9_2_scalar_eq(&r1, &r2)); + rustsecp256k1_v0_10_0_scalar r1, r2; + rustsecp256k1_v0_10_0_scalar_mul(&r1, &s1, &s2); + rustsecp256k1_v0_10_0_scalar_mul(&r1, &r1, &s); + rustsecp256k1_v0_10_0_scalar_mul(&r2, &s2, &s); + rustsecp256k1_v0_10_0_scalar_mul(&r2, &s1, &r2); + CHECK(rustsecp256k1_v0_10_0_scalar_eq(&r1, &r2)); } { /* Test distributitivity of mul over add. */ - rustsecp256k1_v0_9_2_scalar r1, r2, t; - rustsecp256k1_v0_9_2_scalar_add(&r1, &s1, &s2); - rustsecp256k1_v0_9_2_scalar_mul(&r1, &r1, &s); - rustsecp256k1_v0_9_2_scalar_mul(&r2, &s1, &s); - rustsecp256k1_v0_9_2_scalar_mul(&t, &s2, &s); - rustsecp256k1_v0_9_2_scalar_add(&r2, &r2, &t); - CHECK(rustsecp256k1_v0_9_2_scalar_eq(&r1, &r2)); + rustsecp256k1_v0_10_0_scalar r1, r2, t; + rustsecp256k1_v0_10_0_scalar_add(&r1, &s1, &s2); + rustsecp256k1_v0_10_0_scalar_mul(&r1, &r1, &s); + rustsecp256k1_v0_10_0_scalar_mul(&r2, &s1, &s); + rustsecp256k1_v0_10_0_scalar_mul(&t, &s2, &s); + rustsecp256k1_v0_10_0_scalar_add(&r2, &r2, &t); + CHECK(rustsecp256k1_v0_10_0_scalar_eq(&r1, &r2)); } { /* Test multiplicative identity. */ - rustsecp256k1_v0_9_2_scalar r1; - rustsecp256k1_v0_9_2_scalar_mul(&r1, &s1, &rustsecp256k1_v0_9_2_scalar_one); - CHECK(rustsecp256k1_v0_9_2_scalar_eq(&r1, &s1)); + rustsecp256k1_v0_10_0_scalar r1; + rustsecp256k1_v0_10_0_scalar_mul(&r1, &s1, &rustsecp256k1_v0_10_0_scalar_one); + CHECK(rustsecp256k1_v0_10_0_scalar_eq(&r1, &s1)); } { /* Test additive identity. */ - rustsecp256k1_v0_9_2_scalar r1; - rustsecp256k1_v0_9_2_scalar_add(&r1, &s1, &rustsecp256k1_v0_9_2_scalar_zero); - CHECK(rustsecp256k1_v0_9_2_scalar_eq(&r1, &s1)); + rustsecp256k1_v0_10_0_scalar r1; + rustsecp256k1_v0_10_0_scalar_add(&r1, &s1, &rustsecp256k1_v0_10_0_scalar_zero); + CHECK(rustsecp256k1_v0_10_0_scalar_eq(&r1, &s1)); } { /* Test zero product property. */ - rustsecp256k1_v0_9_2_scalar r1; - rustsecp256k1_v0_9_2_scalar_mul(&r1, &s1, &rustsecp256k1_v0_9_2_scalar_zero); - CHECK(rustsecp256k1_v0_9_2_scalar_eq(&r1, &rustsecp256k1_v0_9_2_scalar_zero)); + rustsecp256k1_v0_10_0_scalar r1; + rustsecp256k1_v0_10_0_scalar_mul(&r1, &s1, &rustsecp256k1_v0_10_0_scalar_zero); + CHECK(rustsecp256k1_v0_10_0_scalar_eq(&r1, &rustsecp256k1_v0_10_0_scalar_zero)); } + { + /* Test halving. */ + rustsecp256k1_v0_10_0_scalar r; + rustsecp256k1_v0_10_0_scalar_add(&r, &s, &s); + rustsecp256k1_v0_10_0_scalar_half(&r, &r); + CHECK(rustsecp256k1_v0_10_0_scalar_eq(&r, &s)); + } } static void run_scalar_set_b32_seckey_tests(void) { unsigned char b32[32]; - rustsecp256k1_v0_9_2_scalar s1; - rustsecp256k1_v0_9_2_scalar s2; + rustsecp256k1_v0_10_0_scalar s1; + rustsecp256k1_v0_10_0_scalar s2; /* Usually set_b32 and set_b32_seckey give the same result */ random_scalar_order_b32(b32); - rustsecp256k1_v0_9_2_scalar_set_b32(&s1, b32, NULL); - CHECK(rustsecp256k1_v0_9_2_scalar_set_b32_seckey(&s2, b32) == 1); - CHECK(rustsecp256k1_v0_9_2_scalar_eq(&s1, &s2) == 1); + rustsecp256k1_v0_10_0_scalar_set_b32(&s1, b32, NULL); + CHECK(rustsecp256k1_v0_10_0_scalar_set_b32_seckey(&s2, b32) == 1); + CHECK(rustsecp256k1_v0_10_0_scalar_eq(&s1, &s2) == 1); memset(b32, 0, sizeof(b32)); - CHECK(rustsecp256k1_v0_9_2_scalar_set_b32_seckey(&s2, b32) == 0); + CHECK(rustsecp256k1_v0_10_0_scalar_set_b32_seckey(&s2, b32) == 0); memset(b32, 0xFF, sizeof(b32)); - CHECK(rustsecp256k1_v0_9_2_scalar_set_b32_seckey(&s2, b32) == 0); + CHECK(rustsecp256k1_v0_10_0_scalar_set_b32_seckey(&s2, b32) == 0); } static void run_scalar_tests(void) { @@ -2348,36 +2307,68 @@ static void run_scalar_tests(void) { } { - /* Check that the scalar constants rustsecp256k1_v0_9_2_scalar_zero and - rustsecp256k1_v0_9_2_scalar_one contain the expected values. */ - rustsecp256k1_v0_9_2_scalar zero, one; + /* Check that the scalar constants rustsecp256k1_v0_10_0_scalar_zero and + rustsecp256k1_v0_10_0_scalar_one contain the expected values. */ + rustsecp256k1_v0_10_0_scalar zero, one; - CHECK(rustsecp256k1_v0_9_2_scalar_is_zero(&rustsecp256k1_v0_9_2_scalar_zero)); - rustsecp256k1_v0_9_2_scalar_set_int(&zero, 0); - CHECK(rustsecp256k1_v0_9_2_scalar_eq(&zero, &rustsecp256k1_v0_9_2_scalar_zero)); + CHECK(rustsecp256k1_v0_10_0_scalar_is_zero(&rustsecp256k1_v0_10_0_scalar_zero)); + rustsecp256k1_v0_10_0_scalar_set_int(&zero, 0); + CHECK(rustsecp256k1_v0_10_0_scalar_eq(&zero, &rustsecp256k1_v0_10_0_scalar_zero)); - CHECK(rustsecp256k1_v0_9_2_scalar_is_one(&rustsecp256k1_v0_9_2_scalar_one)); - rustsecp256k1_v0_9_2_scalar_set_int(&one, 1); - CHECK(rustsecp256k1_v0_9_2_scalar_eq(&one, &rustsecp256k1_v0_9_2_scalar_one)); + CHECK(rustsecp256k1_v0_10_0_scalar_is_one(&rustsecp256k1_v0_10_0_scalar_one)); + rustsecp256k1_v0_10_0_scalar_set_int(&one, 1); + CHECK(rustsecp256k1_v0_10_0_scalar_eq(&one, &rustsecp256k1_v0_10_0_scalar_one)); } { /* (-1)+1 should be zero. */ - rustsecp256k1_v0_9_2_scalar o; - rustsecp256k1_v0_9_2_scalar_negate(&o, &rustsecp256k1_v0_9_2_scalar_one); - rustsecp256k1_v0_9_2_scalar_add(&o, &o, &rustsecp256k1_v0_9_2_scalar_one); - CHECK(rustsecp256k1_v0_9_2_scalar_is_zero(&o)); - rustsecp256k1_v0_9_2_scalar_negate(&o, &o); - CHECK(rustsecp256k1_v0_9_2_scalar_is_zero(&o)); + rustsecp256k1_v0_10_0_scalar o; + rustsecp256k1_v0_10_0_scalar_negate(&o, &rustsecp256k1_v0_10_0_scalar_one); + rustsecp256k1_v0_10_0_scalar_add(&o, &o, &rustsecp256k1_v0_10_0_scalar_one); + CHECK(rustsecp256k1_v0_10_0_scalar_is_zero(&o)); + rustsecp256k1_v0_10_0_scalar_negate(&o, &o); + CHECK(rustsecp256k1_v0_10_0_scalar_is_zero(&o)); + } + + { + /* Test that halving and doubling roundtrips on some fixed values. */ + static const rustsecp256k1_v0_10_0_scalar HALF_TESTS[] = { + /* 0 */ + SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0), + /* 1 */ + SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1), + /* -1 */ + SECP256K1_SCALAR_CONST(0xfffffffful, 0xfffffffful, 0xfffffffful, 0xfffffffeul, 0xbaaedce6ul, 0xaf48a03bul, 0xbfd25e8cul, 0xd0364140ul), + /* -2 (largest odd value) */ + SECP256K1_SCALAR_CONST(0xfffffffful, 0xfffffffful, 0xfffffffful, 0xfffffffeul, 0xbaaedce6ul, 0xaf48a03bul, 0xbfd25e8cul, 0xd036413Ful), + /* Half the secp256k1 order */ + SECP256K1_SCALAR_CONST(0x7ffffffful, 0xfffffffful, 0xfffffffful, 0xfffffffful, 0x5d576e73ul, 0x57a4501dul, 0xdfe92f46ul, 0x681b20a0ul), + /* Half the secp256k1 order + 1 */ + SECP256K1_SCALAR_CONST(0x7ffffffful, 0xfffffffful, 0xfffffffful, 0xfffffffful, 0x5d576e73ul, 0x57a4501dul, 0xdfe92f46ul, 0x681b20a1ul), + /* 2^255 */ + SECP256K1_SCALAR_CONST(0x80000000ul, 0, 0, 0, 0, 0, 0, 0), + /* 2^255 - 1 */ + SECP256K1_SCALAR_CONST(0x7ffffffful, 0xfffffffful, 0xfffffffful, 0xfffffffful, 0xfffffffful, 0xfffffffful, 0xfffffffful, 0xfffffffful), + }; + unsigned n; + for (n = 0; n < sizeof(HALF_TESTS) / sizeof(HALF_TESTS[0]); ++n) { + rustsecp256k1_v0_10_0_scalar s; + rustsecp256k1_v0_10_0_scalar_half(&s, &HALF_TESTS[n]); + rustsecp256k1_v0_10_0_scalar_add(&s, &s, &s); + CHECK(rustsecp256k1_v0_10_0_scalar_eq(&s, &HALF_TESTS[n])); + rustsecp256k1_v0_10_0_scalar_add(&s, &s, &s); + rustsecp256k1_v0_10_0_scalar_half(&s, &s); + CHECK(rustsecp256k1_v0_10_0_scalar_eq(&s, &HALF_TESTS[n])); + } } { /* Does check_overflow check catch all ones? */ - static const rustsecp256k1_v0_9_2_scalar overflowed = SECP256K1_SCALAR_CONST( + static const rustsecp256k1_v0_10_0_scalar overflowed = SECP256K1_SCALAR_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL ); - CHECK(rustsecp256k1_v0_9_2_scalar_check_overflow(&overflowed)); + CHECK(rustsecp256k1_v0_10_0_scalar_check_overflow(&overflowed)); } { @@ -2386,13 +2377,13 @@ static void run_scalar_tests(void) { * and edge-case coverage on 32-bit and 64-bit implementations. * The responses were generated with Sage 5.9. */ - rustsecp256k1_v0_9_2_scalar x; - rustsecp256k1_v0_9_2_scalar y; - rustsecp256k1_v0_9_2_scalar z; - rustsecp256k1_v0_9_2_scalar zz; - rustsecp256k1_v0_9_2_scalar r1; - rustsecp256k1_v0_9_2_scalar r2; - rustsecp256k1_v0_9_2_scalar zzv; + rustsecp256k1_v0_10_0_scalar x; + rustsecp256k1_v0_10_0_scalar y; + rustsecp256k1_v0_10_0_scalar z; + rustsecp256k1_v0_10_0_scalar zz; + rustsecp256k1_v0_10_0_scalar r1; + rustsecp256k1_v0_10_0_scalar r2; + rustsecp256k1_v0_10_0_scalar zzv; int overflow; unsigned char chal[33][2][32] = { {{0xff, 0xff, 0x03, 0x07, 0x00, 0x00, 0x00, 0x00, @@ -2927,28 +2918,28 @@ static void run_scalar_tests(void) { 0x5c, 0x02, 0x97, 0x1b, 0x62, 0x43, 0x86, 0xf5}} }; for (i = 0; i < 33; i++) { - rustsecp256k1_v0_9_2_scalar_set_b32(&x, chal[i][0], &overflow); + rustsecp256k1_v0_10_0_scalar_set_b32(&x, chal[i][0], &overflow); CHECK(!overflow); - rustsecp256k1_v0_9_2_scalar_set_b32(&y, chal[i][1], &overflow); + rustsecp256k1_v0_10_0_scalar_set_b32(&y, chal[i][1], &overflow); CHECK(!overflow); - rustsecp256k1_v0_9_2_scalar_set_b32(&r1, res[i][0], &overflow); + rustsecp256k1_v0_10_0_scalar_set_b32(&r1, res[i][0], &overflow); CHECK(!overflow); - rustsecp256k1_v0_9_2_scalar_set_b32(&r2, res[i][1], &overflow); + rustsecp256k1_v0_10_0_scalar_set_b32(&r2, res[i][1], &overflow); CHECK(!overflow); - rustsecp256k1_v0_9_2_scalar_mul(&z, &x, &y); - CHECK(!rustsecp256k1_v0_9_2_scalar_check_overflow(&z)); - CHECK(rustsecp256k1_v0_9_2_scalar_eq(&r1, &z)); - if (!rustsecp256k1_v0_9_2_scalar_is_zero(&y)) { - rustsecp256k1_v0_9_2_scalar_inverse(&zz, &y); - CHECK(!rustsecp256k1_v0_9_2_scalar_check_overflow(&zz)); - rustsecp256k1_v0_9_2_scalar_inverse_var(&zzv, &y); - CHECK(rustsecp256k1_v0_9_2_scalar_eq(&zzv, &zz)); - rustsecp256k1_v0_9_2_scalar_mul(&z, &z, &zz); - CHECK(!rustsecp256k1_v0_9_2_scalar_check_overflow(&z)); - CHECK(rustsecp256k1_v0_9_2_scalar_eq(&x, &z)); - rustsecp256k1_v0_9_2_scalar_mul(&zz, &zz, &y); - CHECK(!rustsecp256k1_v0_9_2_scalar_check_overflow(&zz)); - CHECK(rustsecp256k1_v0_9_2_scalar_eq(&rustsecp256k1_v0_9_2_scalar_one, &zz)); + rustsecp256k1_v0_10_0_scalar_mul(&z, &x, &y); + CHECK(!rustsecp256k1_v0_10_0_scalar_check_overflow(&z)); + CHECK(rustsecp256k1_v0_10_0_scalar_eq(&r1, &z)); + if (!rustsecp256k1_v0_10_0_scalar_is_zero(&y)) { + rustsecp256k1_v0_10_0_scalar_inverse(&zz, &y); + CHECK(!rustsecp256k1_v0_10_0_scalar_check_overflow(&zz)); + rustsecp256k1_v0_10_0_scalar_inverse_var(&zzv, &y); + CHECK(rustsecp256k1_v0_10_0_scalar_eq(&zzv, &zz)); + rustsecp256k1_v0_10_0_scalar_mul(&z, &z, &zz); + CHECK(!rustsecp256k1_v0_10_0_scalar_check_overflow(&z)); + CHECK(rustsecp256k1_v0_10_0_scalar_eq(&x, &z)); + rustsecp256k1_v0_10_0_scalar_mul(&zz, &zz, &y); + CHECK(!rustsecp256k1_v0_10_0_scalar_check_overflow(&zz)); + CHECK(rustsecp256k1_v0_10_0_scalar_eq(&rustsecp256k1_v0_10_0_scalar_one, &zz)); } } } @@ -2956,42 +2947,19 @@ static void run_scalar_tests(void) { /***** FIELD TESTS *****/ -static void random_fe(rustsecp256k1_v0_9_2_fe *x) { - unsigned char bin[32]; - do { - rustsecp256k1_v0_9_2_testrand256(bin); - if (rustsecp256k1_v0_9_2_fe_set_b32_limit(x, bin)) { - return; - } - } while(1); -} - -static void random_fe_non_zero(rustsecp256k1_v0_9_2_fe *nz) { - int tries = 10; - while (--tries >= 0) { - random_fe(nz); - rustsecp256k1_v0_9_2_fe_normalize(nz); - if (!rustsecp256k1_v0_9_2_fe_is_zero(nz)) { - break; - } - } - /* Infinitesimal probability of spurious failure here */ - CHECK(tries >= 0); -} - -static void random_fe_non_square(rustsecp256k1_v0_9_2_fe *ns) { - rustsecp256k1_v0_9_2_fe r; +static void random_fe_non_square(rustsecp256k1_v0_10_0_fe *ns) { + rustsecp256k1_v0_10_0_fe r; random_fe_non_zero(ns); - if (rustsecp256k1_v0_9_2_fe_sqrt(&r, ns)) { - rustsecp256k1_v0_9_2_fe_negate(ns, ns, 1); + if (rustsecp256k1_v0_10_0_fe_sqrt(&r, ns)) { + rustsecp256k1_v0_10_0_fe_negate(ns, ns, 1); } } -static int check_fe_equal(const rustsecp256k1_v0_9_2_fe *a, const rustsecp256k1_v0_9_2_fe *b) { - rustsecp256k1_v0_9_2_fe an = *a; - rustsecp256k1_v0_9_2_fe bn = *b; - rustsecp256k1_v0_9_2_fe_normalize_weak(&an); - return rustsecp256k1_v0_9_2_fe_equal(&an, &bn); +static int check_fe_equal(const rustsecp256k1_v0_10_0_fe *a, const rustsecp256k1_v0_10_0_fe *b) { + rustsecp256k1_v0_10_0_fe an = *a; + rustsecp256k1_v0_10_0_fe bn = *b; + rustsecp256k1_v0_10_0_fe_normalize_weak(&an); + return rustsecp256k1_v0_10_0_fe_equal(&an, &bn); } static void run_field_convert(void) { @@ -3001,27 +2969,27 @@ static void run_field_convert(void) { 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x40 }; - static const rustsecp256k1_v0_9_2_fe_storage fes = SECP256K1_FE_STORAGE_CONST( + static const rustsecp256k1_v0_10_0_fe_storage fes = SECP256K1_FE_STORAGE_CONST( 0x00010203UL, 0x04050607UL, 0x11121314UL, 0x15161718UL, 0x22232425UL, 0x26272829UL, 0x33343536UL, 0x37383940UL ); - static const rustsecp256k1_v0_9_2_fe fe = SECP256K1_FE_CONST( + static const rustsecp256k1_v0_10_0_fe fe = SECP256K1_FE_CONST( 0x00010203UL, 0x04050607UL, 0x11121314UL, 0x15161718UL, 0x22232425UL, 0x26272829UL, 0x33343536UL, 0x37383940UL ); - rustsecp256k1_v0_9_2_fe fe2; + rustsecp256k1_v0_10_0_fe fe2; unsigned char b322[32]; - rustsecp256k1_v0_9_2_fe_storage fes2; + rustsecp256k1_v0_10_0_fe_storage fes2; /* Check conversions to fe. */ - CHECK(rustsecp256k1_v0_9_2_fe_set_b32_limit(&fe2, b32)); - CHECK(rustsecp256k1_v0_9_2_fe_equal(&fe, &fe2)); - rustsecp256k1_v0_9_2_fe_from_storage(&fe2, &fes); - CHECK(rustsecp256k1_v0_9_2_fe_equal(&fe, &fe2)); + CHECK(rustsecp256k1_v0_10_0_fe_set_b32_limit(&fe2, b32)); + CHECK(rustsecp256k1_v0_10_0_fe_equal(&fe, &fe2)); + rustsecp256k1_v0_10_0_fe_from_storage(&fe2, &fes); + CHECK(rustsecp256k1_v0_10_0_fe_equal(&fe, &fe2)); /* Check conversion from fe. */ - rustsecp256k1_v0_9_2_fe_get_b32(b322, &fe); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(b322, b32, 32) == 0); - rustsecp256k1_v0_9_2_fe_to_storage(&fes2, &fe); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&fes2, &fes, sizeof(fes)) == 0); + rustsecp256k1_v0_10_0_fe_get_b32(b322, &fe); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(b322, b32, 32) == 0); + rustsecp256k1_v0_10_0_fe_to_storage(&fes2, &fe); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&fes2, &fes, sizeof(fes)) == 0); } static void run_field_be32_overflow(void) { @@ -3034,14 +3002,14 @@ static void run_field_be32_overflow(void) { }; static const unsigned char zero[32] = { 0x00 }; unsigned char out[32]; - rustsecp256k1_v0_9_2_fe fe; - CHECK(rustsecp256k1_v0_9_2_fe_set_b32_limit(&fe, zero_overflow) == 0); - rustsecp256k1_v0_9_2_fe_set_b32_mod(&fe, zero_overflow); - CHECK(rustsecp256k1_v0_9_2_fe_normalizes_to_zero(&fe) == 1); - rustsecp256k1_v0_9_2_fe_normalize(&fe); - CHECK(rustsecp256k1_v0_9_2_fe_is_zero(&fe) == 1); - rustsecp256k1_v0_9_2_fe_get_b32(out, &fe); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(out, zero, 32) == 0); + rustsecp256k1_v0_10_0_fe fe; + CHECK(rustsecp256k1_v0_10_0_fe_set_b32_limit(&fe, zero_overflow) == 0); + rustsecp256k1_v0_10_0_fe_set_b32_mod(&fe, zero_overflow); + CHECK(rustsecp256k1_v0_10_0_fe_normalizes_to_zero(&fe) == 1); + rustsecp256k1_v0_10_0_fe_normalize(&fe); + CHECK(rustsecp256k1_v0_10_0_fe_is_zero(&fe) == 1); + rustsecp256k1_v0_10_0_fe_get_b32(out, &fe); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(out, zero, 32) == 0); } { static const unsigned char one_overflow[32] = { @@ -3057,13 +3025,13 @@ static void run_field_be32_overflow(void) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, }; unsigned char out[32]; - rustsecp256k1_v0_9_2_fe fe; - CHECK(rustsecp256k1_v0_9_2_fe_set_b32_limit(&fe, one_overflow) == 0); - rustsecp256k1_v0_9_2_fe_set_b32_mod(&fe, one_overflow); - rustsecp256k1_v0_9_2_fe_normalize(&fe); - CHECK(rustsecp256k1_v0_9_2_fe_cmp_var(&fe, &rustsecp256k1_v0_9_2_fe_one) == 0); - rustsecp256k1_v0_9_2_fe_get_b32(out, &fe); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(out, one, 32) == 0); + rustsecp256k1_v0_10_0_fe fe; + CHECK(rustsecp256k1_v0_10_0_fe_set_b32_limit(&fe, one_overflow) == 0); + rustsecp256k1_v0_10_0_fe_set_b32_mod(&fe, one_overflow); + rustsecp256k1_v0_10_0_fe_normalize(&fe); + CHECK(rustsecp256k1_v0_10_0_fe_cmp_var(&fe, &rustsecp256k1_v0_10_0_fe_one) == 0); + rustsecp256k1_v0_10_0_fe_get_b32(out, &fe); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(out, one, 32) == 0); } { static const unsigned char ff_overflow[32] = { @@ -3079,116 +3047,116 @@ static void run_field_be32_overflow(void) { 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x03, 0xD0, }; unsigned char out[32]; - rustsecp256k1_v0_9_2_fe fe; - const rustsecp256k1_v0_9_2_fe fe_ff = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0x01, 0x000003d0); - CHECK(rustsecp256k1_v0_9_2_fe_set_b32_limit(&fe, ff_overflow) == 0); - rustsecp256k1_v0_9_2_fe_set_b32_mod(&fe, ff_overflow); - rustsecp256k1_v0_9_2_fe_normalize(&fe); - CHECK(rustsecp256k1_v0_9_2_fe_cmp_var(&fe, &fe_ff) == 0); - rustsecp256k1_v0_9_2_fe_get_b32(out, &fe); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(out, ff, 32) == 0); + rustsecp256k1_v0_10_0_fe fe; + const rustsecp256k1_v0_10_0_fe fe_ff = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0x01, 0x000003d0); + CHECK(rustsecp256k1_v0_10_0_fe_set_b32_limit(&fe, ff_overflow) == 0); + rustsecp256k1_v0_10_0_fe_set_b32_mod(&fe, ff_overflow); + rustsecp256k1_v0_10_0_fe_normalize(&fe); + CHECK(rustsecp256k1_v0_10_0_fe_cmp_var(&fe, &fe_ff) == 0); + rustsecp256k1_v0_10_0_fe_get_b32(out, &fe); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(out, ff, 32) == 0); } } /* Returns true if two field elements have the same representation. */ -static int fe_identical(const rustsecp256k1_v0_9_2_fe *a, const rustsecp256k1_v0_9_2_fe *b) { +static int fe_identical(const rustsecp256k1_v0_10_0_fe *a, const rustsecp256k1_v0_10_0_fe *b) { int ret = 1; /* Compare the struct member that holds the limbs. */ - ret &= (rustsecp256k1_v0_9_2_memcmp_var(a->n, b->n, sizeof(a->n)) == 0); + ret &= (rustsecp256k1_v0_10_0_memcmp_var(a->n, b->n, sizeof(a->n)) == 0); return ret; } static void run_field_half(void) { - rustsecp256k1_v0_9_2_fe t, u; + rustsecp256k1_v0_10_0_fe t, u; int m; /* Check magnitude 0 input */ - rustsecp256k1_v0_9_2_fe_get_bounds(&t, 0); - rustsecp256k1_v0_9_2_fe_half(&t); + rustsecp256k1_v0_10_0_fe_get_bounds(&t, 0); + rustsecp256k1_v0_10_0_fe_half(&t); #ifdef VERIFY CHECK(t.magnitude == 1); CHECK(t.normalized == 0); #endif - CHECK(rustsecp256k1_v0_9_2_fe_normalizes_to_zero(&t)); + CHECK(rustsecp256k1_v0_10_0_fe_normalizes_to_zero(&t)); /* Check non-zero magnitudes in the supported range */ for (m = 1; m < 32; m++) { /* Check max-value input */ - rustsecp256k1_v0_9_2_fe_get_bounds(&t, m); + rustsecp256k1_v0_10_0_fe_get_bounds(&t, m); u = t; - rustsecp256k1_v0_9_2_fe_half(&u); + rustsecp256k1_v0_10_0_fe_half(&u); #ifdef VERIFY CHECK(u.magnitude == (m >> 1) + 1); CHECK(u.normalized == 0); #endif - rustsecp256k1_v0_9_2_fe_normalize_weak(&u); - rustsecp256k1_v0_9_2_fe_add(&u, &u); + rustsecp256k1_v0_10_0_fe_normalize_weak(&u); + rustsecp256k1_v0_10_0_fe_add(&u, &u); CHECK(check_fe_equal(&t, &u)); /* Check worst-case input: ensure the LSB is 1 so that P will be added, * which will also cause all carries to be 1, since all limbs that can * generate a carry are initially even and all limbs of P are odd in * every existing field implementation. */ - rustsecp256k1_v0_9_2_fe_get_bounds(&t, m); + rustsecp256k1_v0_10_0_fe_get_bounds(&t, m); CHECK(t.n[0] > 0); CHECK((t.n[0] & 1) == 0); --t.n[0]; u = t; - rustsecp256k1_v0_9_2_fe_half(&u); + rustsecp256k1_v0_10_0_fe_half(&u); #ifdef VERIFY CHECK(u.magnitude == (m >> 1) + 1); CHECK(u.normalized == 0); #endif - rustsecp256k1_v0_9_2_fe_normalize_weak(&u); - rustsecp256k1_v0_9_2_fe_add(&u, &u); + rustsecp256k1_v0_10_0_fe_normalize_weak(&u); + rustsecp256k1_v0_10_0_fe_add(&u, &u); CHECK(check_fe_equal(&t, &u)); } } static void run_field_misc(void) { - rustsecp256k1_v0_9_2_fe x; - rustsecp256k1_v0_9_2_fe y; - rustsecp256k1_v0_9_2_fe z; - rustsecp256k1_v0_9_2_fe q; + rustsecp256k1_v0_10_0_fe x; + rustsecp256k1_v0_10_0_fe y; + rustsecp256k1_v0_10_0_fe z; + rustsecp256k1_v0_10_0_fe q; int v; - rustsecp256k1_v0_9_2_fe fe5 = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 5); + rustsecp256k1_v0_10_0_fe fe5 = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 5); int i, j; for (i = 0; i < 1000 * COUNT; i++) { - rustsecp256k1_v0_9_2_fe_storage xs, ys, zs; + rustsecp256k1_v0_10_0_fe_storage xs, ys, zs; if (i & 1) { random_fe(&x); } else { random_fe_test(&x); } random_fe_non_zero(&y); - v = rustsecp256k1_v0_9_2_testrand_bits(15); + v = rustsecp256k1_v0_10_0_testrand_bits(15); /* Test that fe_add_int is equivalent to fe_set_int + fe_add. */ - rustsecp256k1_v0_9_2_fe_set_int(&q, v); /* q = v */ + rustsecp256k1_v0_10_0_fe_set_int(&q, v); /* q = v */ z = x; /* z = x */ - rustsecp256k1_v0_9_2_fe_add(&z, &q); /* z = x+v */ + rustsecp256k1_v0_10_0_fe_add(&z, &q); /* z = x+v */ q = x; /* q = x */ - rustsecp256k1_v0_9_2_fe_add_int(&q, v); /* q = x+v */ + rustsecp256k1_v0_10_0_fe_add_int(&q, v); /* q = x+v */ CHECK(check_fe_equal(&q, &z)); /* Test the fe equality and comparison operations. */ - CHECK(rustsecp256k1_v0_9_2_fe_cmp_var(&x, &x) == 0); - CHECK(rustsecp256k1_v0_9_2_fe_equal(&x, &x)); + CHECK(rustsecp256k1_v0_10_0_fe_cmp_var(&x, &x) == 0); + CHECK(rustsecp256k1_v0_10_0_fe_equal(&x, &x)); z = x; - rustsecp256k1_v0_9_2_fe_add(&z,&y); + rustsecp256k1_v0_10_0_fe_add(&z,&y); /* Test fe conditional move; z is not normalized here. */ q = x; - rustsecp256k1_v0_9_2_fe_cmov(&x, &z, 0); + rustsecp256k1_v0_10_0_fe_cmov(&x, &z, 0); #ifdef VERIFY CHECK(!x.normalized); CHECK((x.magnitude == q.magnitude) || (x.magnitude == z.magnitude)); CHECK((x.magnitude >= q.magnitude) && (x.magnitude >= z.magnitude)); #endif x = q; - rustsecp256k1_v0_9_2_fe_cmov(&x, &x, 1); + rustsecp256k1_v0_10_0_fe_cmov(&x, &x, 1); CHECK(!fe_identical(&x, &z)); CHECK(fe_identical(&x, &q)); - rustsecp256k1_v0_9_2_fe_cmov(&q, &z, 1); + rustsecp256k1_v0_10_0_fe_cmov(&q, &z, 1); #ifdef VERIFY CHECK(!q.normalized); CHECK((q.magnitude == x.magnitude) || (q.magnitude == z.magnitude)); @@ -3196,67 +3164,67 @@ static void run_field_misc(void) { #endif CHECK(fe_identical(&q, &z)); q = z; - rustsecp256k1_v0_9_2_fe_normalize_var(&x); - rustsecp256k1_v0_9_2_fe_normalize_var(&z); - CHECK(!rustsecp256k1_v0_9_2_fe_equal(&x, &z)); - rustsecp256k1_v0_9_2_fe_normalize_var(&q); - rustsecp256k1_v0_9_2_fe_cmov(&q, &z, (i&1)); + rustsecp256k1_v0_10_0_fe_normalize_var(&x); + rustsecp256k1_v0_10_0_fe_normalize_var(&z); + CHECK(!rustsecp256k1_v0_10_0_fe_equal(&x, &z)); + rustsecp256k1_v0_10_0_fe_normalize_var(&q); + rustsecp256k1_v0_10_0_fe_cmov(&q, &z, (i&1)); #ifdef VERIFY CHECK(q.normalized && q.magnitude == 1); #endif for (j = 0; j < 6; j++) { - rustsecp256k1_v0_9_2_fe_negate_unchecked(&z, &z, j+1); - rustsecp256k1_v0_9_2_fe_normalize_var(&q); - rustsecp256k1_v0_9_2_fe_cmov(&q, &z, (j&1)); + rustsecp256k1_v0_10_0_fe_negate_unchecked(&z, &z, j+1); + rustsecp256k1_v0_10_0_fe_normalize_var(&q); + rustsecp256k1_v0_10_0_fe_cmov(&q, &z, (j&1)); #ifdef VERIFY CHECK(!q.normalized && q.magnitude == z.magnitude); #endif } - rustsecp256k1_v0_9_2_fe_normalize_var(&z); + rustsecp256k1_v0_10_0_fe_normalize_var(&z); /* Test storage conversion and conditional moves. */ - rustsecp256k1_v0_9_2_fe_to_storage(&xs, &x); - rustsecp256k1_v0_9_2_fe_to_storage(&ys, &y); - rustsecp256k1_v0_9_2_fe_to_storage(&zs, &z); - rustsecp256k1_v0_9_2_fe_storage_cmov(&zs, &xs, 0); - rustsecp256k1_v0_9_2_fe_storage_cmov(&zs, &zs, 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&xs, &zs, sizeof(xs)) != 0); - rustsecp256k1_v0_9_2_fe_storage_cmov(&ys, &xs, 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&xs, &ys, sizeof(xs)) == 0); - rustsecp256k1_v0_9_2_fe_from_storage(&x, &xs); - rustsecp256k1_v0_9_2_fe_from_storage(&y, &ys); - rustsecp256k1_v0_9_2_fe_from_storage(&z, &zs); + rustsecp256k1_v0_10_0_fe_to_storage(&xs, &x); + rustsecp256k1_v0_10_0_fe_to_storage(&ys, &y); + rustsecp256k1_v0_10_0_fe_to_storage(&zs, &z); + rustsecp256k1_v0_10_0_fe_storage_cmov(&zs, &xs, 0); + rustsecp256k1_v0_10_0_fe_storage_cmov(&zs, &zs, 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&xs, &zs, sizeof(xs)) != 0); + rustsecp256k1_v0_10_0_fe_storage_cmov(&ys, &xs, 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&xs, &ys, sizeof(xs)) == 0); + rustsecp256k1_v0_10_0_fe_from_storage(&x, &xs); + rustsecp256k1_v0_10_0_fe_from_storage(&y, &ys); + rustsecp256k1_v0_10_0_fe_from_storage(&z, &zs); /* Test that mul_int, mul, and add agree. */ - rustsecp256k1_v0_9_2_fe_add(&y, &x); - rustsecp256k1_v0_9_2_fe_add(&y, &x); + rustsecp256k1_v0_10_0_fe_add(&y, &x); + rustsecp256k1_v0_10_0_fe_add(&y, &x); z = x; - rustsecp256k1_v0_9_2_fe_mul_int(&z, 3); + rustsecp256k1_v0_10_0_fe_mul_int(&z, 3); CHECK(check_fe_equal(&y, &z)); - rustsecp256k1_v0_9_2_fe_add(&y, &x); - rustsecp256k1_v0_9_2_fe_add(&z, &x); + rustsecp256k1_v0_10_0_fe_add(&y, &x); + rustsecp256k1_v0_10_0_fe_add(&z, &x); CHECK(check_fe_equal(&z, &y)); z = x; - rustsecp256k1_v0_9_2_fe_mul_int(&z, 5); - rustsecp256k1_v0_9_2_fe_mul(&q, &x, &fe5); + rustsecp256k1_v0_10_0_fe_mul_int(&z, 5); + rustsecp256k1_v0_10_0_fe_mul(&q, &x, &fe5); CHECK(check_fe_equal(&z, &q)); - rustsecp256k1_v0_9_2_fe_negate(&x, &x, 1); - rustsecp256k1_v0_9_2_fe_add(&z, &x); - rustsecp256k1_v0_9_2_fe_add(&q, &x); + rustsecp256k1_v0_10_0_fe_negate(&x, &x, 1); + rustsecp256k1_v0_10_0_fe_add(&z, &x); + rustsecp256k1_v0_10_0_fe_add(&q, &x); CHECK(check_fe_equal(&y, &z)); CHECK(check_fe_equal(&q, &y)); - /* Check rustsecp256k1_v0_9_2_fe_half. */ + /* Check rustsecp256k1_v0_10_0_fe_half. */ z = x; - rustsecp256k1_v0_9_2_fe_half(&z); - rustsecp256k1_v0_9_2_fe_add(&z, &z); + rustsecp256k1_v0_10_0_fe_half(&z); + rustsecp256k1_v0_10_0_fe_add(&z, &z); CHECK(check_fe_equal(&x, &z)); - rustsecp256k1_v0_9_2_fe_add(&z, &z); - rustsecp256k1_v0_9_2_fe_half(&z); + rustsecp256k1_v0_10_0_fe_add(&z, &z); + rustsecp256k1_v0_10_0_fe_half(&z); CHECK(check_fe_equal(&x, &z)); } } -static void test_fe_mul(const rustsecp256k1_v0_9_2_fe* a, const rustsecp256k1_v0_9_2_fe* b, int use_sqr) +static void test_fe_mul(const rustsecp256k1_v0_10_0_fe* a, const rustsecp256k1_v0_10_0_fe* b, int use_sqr) { - rustsecp256k1_v0_9_2_fe c, an, bn; + rustsecp256k1_v0_10_0_fe c, an, bn; /* Variables in BE 32-byte format. */ unsigned char a32[32], b32[32], c32[32]; /* Variables in LE 16x uint16_t format. */ @@ -3272,20 +3240,20 @@ static void test_fe_mul(const rustsecp256k1_v0_9_2_fe* a, const rustsecp256k1_v0 /* Compute C = A * B in fe format. */ c = *a; if (use_sqr) { - rustsecp256k1_v0_9_2_fe_sqr(&c, &c); + rustsecp256k1_v0_10_0_fe_sqr(&c, &c); } else { - rustsecp256k1_v0_9_2_fe_mul(&c, &c, b); + rustsecp256k1_v0_10_0_fe_mul(&c, &c, b); } /* Convert A, B, C into LE 16x uint16_t format. */ an = *a; bn = *b; - rustsecp256k1_v0_9_2_fe_normalize_var(&c); - rustsecp256k1_v0_9_2_fe_normalize_var(&an); - rustsecp256k1_v0_9_2_fe_normalize_var(&bn); - rustsecp256k1_v0_9_2_fe_get_b32(a32, &an); - rustsecp256k1_v0_9_2_fe_get_b32(b32, &bn); - rustsecp256k1_v0_9_2_fe_get_b32(c32, &c); + rustsecp256k1_v0_10_0_fe_normalize_var(&c); + rustsecp256k1_v0_10_0_fe_normalize_var(&an); + rustsecp256k1_v0_10_0_fe_normalize_var(&bn); + rustsecp256k1_v0_10_0_fe_get_b32(a32, &an); + rustsecp256k1_v0_10_0_fe_get_b32(b32, &bn); + rustsecp256k1_v0_10_0_fe_get_b32(c32, &c); for (i = 0; i < 16; ++i) { a16[i] = a32[31 - 2*i] + ((uint16_t)a32[30 - 2*i] << 8); b16[i] = b32[31 - 2*i] + ((uint16_t)b32[30 - 2*i] << 8); @@ -3294,13 +3262,13 @@ static void test_fe_mul(const rustsecp256k1_v0_9_2_fe* a, const rustsecp256k1_v0 /* Compute T = A * B in LE 16x uint16_t format. */ mulmod256(t16, a16, b16, m16); /* Compare */ - CHECK(rustsecp256k1_v0_9_2_memcmp_var(t16, c16, 32) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(t16, c16, 32) == 0); } static void run_fe_mul(void) { int i; for (i = 0; i < 100 * COUNT; ++i) { - rustsecp256k1_v0_9_2_fe a, b, c, d; + rustsecp256k1_v0_10_0_fe a, b, c, d; random_fe(&a); random_fe_magnitude(&a); random_fe(&b); @@ -3319,50 +3287,50 @@ static void run_fe_mul(void) { } static void run_sqr(void) { - rustsecp256k1_v0_9_2_fe x, s; + rustsecp256k1_v0_10_0_fe x, s; { int i; - rustsecp256k1_v0_9_2_fe_set_int(&x, 1); - rustsecp256k1_v0_9_2_fe_negate(&x, &x, 1); + rustsecp256k1_v0_10_0_fe_set_int(&x, 1); + rustsecp256k1_v0_10_0_fe_negate(&x, &x, 1); for (i = 1; i <= 512; ++i) { - rustsecp256k1_v0_9_2_fe_mul_int(&x, 2); - rustsecp256k1_v0_9_2_fe_normalize(&x); - rustsecp256k1_v0_9_2_fe_sqr(&s, &x); + rustsecp256k1_v0_10_0_fe_mul_int(&x, 2); + rustsecp256k1_v0_10_0_fe_normalize(&x); + rustsecp256k1_v0_10_0_fe_sqr(&s, &x); } } } -static void test_sqrt(const rustsecp256k1_v0_9_2_fe *a, const rustsecp256k1_v0_9_2_fe *k) { - rustsecp256k1_v0_9_2_fe r1, r2; - int v = rustsecp256k1_v0_9_2_fe_sqrt(&r1, a); +static void test_sqrt(const rustsecp256k1_v0_10_0_fe *a, const rustsecp256k1_v0_10_0_fe *k) { + rustsecp256k1_v0_10_0_fe r1, r2; + int v = rustsecp256k1_v0_10_0_fe_sqrt(&r1, a); CHECK((v == 0) == (k == NULL)); if (k != NULL) { /* Check that the returned root is +/- the given known answer */ - rustsecp256k1_v0_9_2_fe_negate(&r2, &r1, 1); - rustsecp256k1_v0_9_2_fe_add(&r1, k); rustsecp256k1_v0_9_2_fe_add(&r2, k); - rustsecp256k1_v0_9_2_fe_normalize(&r1); rustsecp256k1_v0_9_2_fe_normalize(&r2); - CHECK(rustsecp256k1_v0_9_2_fe_is_zero(&r1) || rustsecp256k1_v0_9_2_fe_is_zero(&r2)); + rustsecp256k1_v0_10_0_fe_negate(&r2, &r1, 1); + rustsecp256k1_v0_10_0_fe_add(&r1, k); rustsecp256k1_v0_10_0_fe_add(&r2, k); + rustsecp256k1_v0_10_0_fe_normalize(&r1); rustsecp256k1_v0_10_0_fe_normalize(&r2); + CHECK(rustsecp256k1_v0_10_0_fe_is_zero(&r1) || rustsecp256k1_v0_10_0_fe_is_zero(&r2)); } } static void run_sqrt(void) { - rustsecp256k1_v0_9_2_fe ns, x, s, t; + rustsecp256k1_v0_10_0_fe ns, x, s, t; int i; /* Check sqrt(0) is 0 */ - rustsecp256k1_v0_9_2_fe_set_int(&x, 0); - rustsecp256k1_v0_9_2_fe_sqr(&s, &x); + rustsecp256k1_v0_10_0_fe_set_int(&x, 0); + rustsecp256k1_v0_10_0_fe_sqr(&s, &x); test_sqrt(&s, &x); /* Check sqrt of small squares (and their negatives) */ for (i = 1; i <= 100; i++) { - rustsecp256k1_v0_9_2_fe_set_int(&x, i); - rustsecp256k1_v0_9_2_fe_sqr(&s, &x); + rustsecp256k1_v0_10_0_fe_set_int(&x, i); + rustsecp256k1_v0_10_0_fe_sqr(&s, &x); test_sqrt(&s, &x); - rustsecp256k1_v0_9_2_fe_negate(&t, &s, 1); + rustsecp256k1_v0_10_0_fe_negate(&t, &s, 1); test_sqrt(&t, NULL); } @@ -3372,13 +3340,13 @@ static void run_sqrt(void) { random_fe_non_square(&ns); for (j = 0; j < COUNT; j++) { random_fe(&x); - rustsecp256k1_v0_9_2_fe_sqr(&s, &x); - CHECK(rustsecp256k1_v0_9_2_fe_is_square_var(&s)); + rustsecp256k1_v0_10_0_fe_sqr(&s, &x); + CHECK(rustsecp256k1_v0_10_0_fe_is_square_var(&s)); test_sqrt(&s, &x); - rustsecp256k1_v0_9_2_fe_negate(&t, &s, 1); - CHECK(!rustsecp256k1_v0_9_2_fe_is_square_var(&t)); + rustsecp256k1_v0_10_0_fe_negate(&t, &s, 1); + CHECK(!rustsecp256k1_v0_10_0_fe_is_square_var(&t)); test_sqrt(&t, NULL); - rustsecp256k1_v0_9_2_fe_mul(&t, &s, &ns); + rustsecp256k1_v0_10_0_fe_mul(&t, &s, &ns); test_sqrt(&t, NULL); } } @@ -3386,12 +3354,12 @@ static void run_sqrt(void) { /***** FIELD/SCALAR INVERSE TESTS *****/ -static const rustsecp256k1_v0_9_2_scalar scalar_minus_one = SECP256K1_SCALAR_CONST( +static const rustsecp256k1_v0_10_0_scalar scalar_minus_one = SECP256K1_SCALAR_CONST( 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE, 0xBAAEDCE6, 0xAF48A03B, 0xBFD25E8C, 0xD0364140 ); -static const rustsecp256k1_v0_9_2_fe fe_minus_one = SECP256K1_FE_CONST( +static const rustsecp256k1_v0_10_0_fe fe_minus_one = SECP256K1_FE_CONST( 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFC2E ); @@ -3403,57 +3371,57 @@ static const rustsecp256k1_v0_9_2_fe fe_minus_one = SECP256K1_FE_CONST( * for x!=0 and x!=1: 1/(1/x - 1) + 1 == -1/(x-1) */ -static void test_inverse_scalar(rustsecp256k1_v0_9_2_scalar* out, const rustsecp256k1_v0_9_2_scalar* x, int var) +static void test_inverse_scalar(rustsecp256k1_v0_10_0_scalar* out, const rustsecp256k1_v0_10_0_scalar* x, int var) { - rustsecp256k1_v0_9_2_scalar l, r, t; + rustsecp256k1_v0_10_0_scalar l, r, t; - (var ? rustsecp256k1_v0_9_2_scalar_inverse_var : rustsecp256k1_v0_9_2_scalar_inverse)(&l, x); /* l = 1/x */ + (var ? rustsecp256k1_v0_10_0_scalar_inverse_var : rustsecp256k1_v0_10_0_scalar_inverse)(&l, x); /* l = 1/x */ if (out) *out = l; - if (rustsecp256k1_v0_9_2_scalar_is_zero(x)) { - CHECK(rustsecp256k1_v0_9_2_scalar_is_zero(&l)); + if (rustsecp256k1_v0_10_0_scalar_is_zero(x)) { + CHECK(rustsecp256k1_v0_10_0_scalar_is_zero(&l)); return; } - rustsecp256k1_v0_9_2_scalar_mul(&t, x, &l); /* t = x*(1/x) */ - CHECK(rustsecp256k1_v0_9_2_scalar_is_one(&t)); /* x*(1/x) == 1 */ - rustsecp256k1_v0_9_2_scalar_add(&r, x, &scalar_minus_one); /* r = x-1 */ - if (rustsecp256k1_v0_9_2_scalar_is_zero(&r)) return; - (var ? rustsecp256k1_v0_9_2_scalar_inverse_var : rustsecp256k1_v0_9_2_scalar_inverse)(&r, &r); /* r = 1/(x-1) */ - rustsecp256k1_v0_9_2_scalar_add(&l, &scalar_minus_one, &l); /* l = 1/x-1 */ - (var ? rustsecp256k1_v0_9_2_scalar_inverse_var : rustsecp256k1_v0_9_2_scalar_inverse)(&l, &l); /* l = 1/(1/x-1) */ - rustsecp256k1_v0_9_2_scalar_add(&l, &l, &rustsecp256k1_v0_9_2_scalar_one); /* l = 1/(1/x-1)+1 */ - rustsecp256k1_v0_9_2_scalar_add(&l, &r, &l); /* l = 1/(1/x-1)+1 + 1/(x-1) */ - CHECK(rustsecp256k1_v0_9_2_scalar_is_zero(&l)); /* l == 0 */ + rustsecp256k1_v0_10_0_scalar_mul(&t, x, &l); /* t = x*(1/x) */ + CHECK(rustsecp256k1_v0_10_0_scalar_is_one(&t)); /* x*(1/x) == 1 */ + rustsecp256k1_v0_10_0_scalar_add(&r, x, &scalar_minus_one); /* r = x-1 */ + if (rustsecp256k1_v0_10_0_scalar_is_zero(&r)) return; + (var ? rustsecp256k1_v0_10_0_scalar_inverse_var : rustsecp256k1_v0_10_0_scalar_inverse)(&r, &r); /* r = 1/(x-1) */ + rustsecp256k1_v0_10_0_scalar_add(&l, &scalar_minus_one, &l); /* l = 1/x-1 */ + (var ? rustsecp256k1_v0_10_0_scalar_inverse_var : rustsecp256k1_v0_10_0_scalar_inverse)(&l, &l); /* l = 1/(1/x-1) */ + rustsecp256k1_v0_10_0_scalar_add(&l, &l, &rustsecp256k1_v0_10_0_scalar_one); /* l = 1/(1/x-1)+1 */ + rustsecp256k1_v0_10_0_scalar_add(&l, &r, &l); /* l = 1/(1/x-1)+1 + 1/(x-1) */ + CHECK(rustsecp256k1_v0_10_0_scalar_is_zero(&l)); /* l == 0 */ } -static void test_inverse_field(rustsecp256k1_v0_9_2_fe* out, const rustsecp256k1_v0_9_2_fe* x, int var) +static void test_inverse_field(rustsecp256k1_v0_10_0_fe* out, const rustsecp256k1_v0_10_0_fe* x, int var) { - rustsecp256k1_v0_9_2_fe l, r, t; + rustsecp256k1_v0_10_0_fe l, r, t; - (var ? rustsecp256k1_v0_9_2_fe_inv_var : rustsecp256k1_v0_9_2_fe_inv)(&l, x) ; /* l = 1/x */ + (var ? rustsecp256k1_v0_10_0_fe_inv_var : rustsecp256k1_v0_10_0_fe_inv)(&l, x) ; /* l = 1/x */ if (out) *out = l; t = *x; /* t = x */ - if (rustsecp256k1_v0_9_2_fe_normalizes_to_zero_var(&t)) { - CHECK(rustsecp256k1_v0_9_2_fe_normalizes_to_zero(&l)); + if (rustsecp256k1_v0_10_0_fe_normalizes_to_zero_var(&t)) { + CHECK(rustsecp256k1_v0_10_0_fe_normalizes_to_zero(&l)); return; } - rustsecp256k1_v0_9_2_fe_mul(&t, x, &l); /* t = x*(1/x) */ - rustsecp256k1_v0_9_2_fe_add(&t, &fe_minus_one); /* t = x*(1/x)-1 */ - CHECK(rustsecp256k1_v0_9_2_fe_normalizes_to_zero(&t)); /* x*(1/x)-1 == 0 */ + rustsecp256k1_v0_10_0_fe_mul(&t, x, &l); /* t = x*(1/x) */ + rustsecp256k1_v0_10_0_fe_add(&t, &fe_minus_one); /* t = x*(1/x)-1 */ + CHECK(rustsecp256k1_v0_10_0_fe_normalizes_to_zero(&t)); /* x*(1/x)-1 == 0 */ r = *x; /* r = x */ - rustsecp256k1_v0_9_2_fe_add(&r, &fe_minus_one); /* r = x-1 */ - if (rustsecp256k1_v0_9_2_fe_normalizes_to_zero_var(&r)) return; - (var ? rustsecp256k1_v0_9_2_fe_inv_var : rustsecp256k1_v0_9_2_fe_inv)(&r, &r); /* r = 1/(x-1) */ - rustsecp256k1_v0_9_2_fe_add(&l, &fe_minus_one); /* l = 1/x-1 */ - (var ? rustsecp256k1_v0_9_2_fe_inv_var : rustsecp256k1_v0_9_2_fe_inv)(&l, &l); /* l = 1/(1/x-1) */ - rustsecp256k1_v0_9_2_fe_add_int(&l, 1); /* l = 1/(1/x-1)+1 */ - rustsecp256k1_v0_9_2_fe_add(&l, &r); /* l = 1/(1/x-1)+1 + 1/(x-1) */ - CHECK(rustsecp256k1_v0_9_2_fe_normalizes_to_zero_var(&l)); /* l == 0 */ + rustsecp256k1_v0_10_0_fe_add(&r, &fe_minus_one); /* r = x-1 */ + if (rustsecp256k1_v0_10_0_fe_normalizes_to_zero_var(&r)) return; + (var ? rustsecp256k1_v0_10_0_fe_inv_var : rustsecp256k1_v0_10_0_fe_inv)(&r, &r); /* r = 1/(x-1) */ + rustsecp256k1_v0_10_0_fe_add(&l, &fe_minus_one); /* l = 1/x-1 */ + (var ? rustsecp256k1_v0_10_0_fe_inv_var : rustsecp256k1_v0_10_0_fe_inv)(&l, &l); /* l = 1/(1/x-1) */ + rustsecp256k1_v0_10_0_fe_add_int(&l, 1); /* l = 1/(1/x-1)+1 */ + rustsecp256k1_v0_10_0_fe_add(&l, &r); /* l = 1/(1/x-1)+1 + 1/(x-1) */ + CHECK(rustsecp256k1_v0_10_0_fe_normalizes_to_zero_var(&l)); /* l == 0 */ } static void run_inverse_tests(void) { /* Fixed test cases for field inverses: pairs of (x, 1/x) mod p. */ - static const rustsecp256k1_v0_9_2_fe fe_cases[][2] = { + static const rustsecp256k1_v0_10_0_fe fe_cases[][2] = { /* 0 */ {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0)}, @@ -3558,7 +3526,7 @@ static void run_inverse_tests(void) SECP256K1_FE_CONST(0x9a94b9b5, 0x57eb71ee, 0x4c975b8b, 0xac5262a8, 0x077b0595, 0xe12a6b1f, 0xd728edef, 0x1a6bf956)} }; /* Fixed test cases for scalar inverses: pairs of (x, 1/x) mod n. */ - static const rustsecp256k1_v0_9_2_scalar scalar_cases[][2] = { + static const rustsecp256k1_v0_10_0_scalar scalar_cases[][2] = { /* 0 */ {SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0)}, @@ -3645,8 +3613,8 @@ static void run_inverse_tests(void) }; int i, var, testrand; unsigned char b32[32]; - rustsecp256k1_v0_9_2_fe x_fe; - rustsecp256k1_v0_9_2_scalar x_scalar; + rustsecp256k1_v0_10_0_fe x_fe; + rustsecp256k1_v0_10_0_scalar x_scalar; memset(b32, 0, sizeof(b32)); /* Test fixed test cases through test_inverse_{scalar,field}, both ways. */ for (i = 0; (size_t)i < sizeof(fe_cases)/sizeof(fe_cases[0]); ++i) { @@ -3660,23 +3628,23 @@ static void run_inverse_tests(void) for (i = 0; (size_t)i < sizeof(scalar_cases)/sizeof(scalar_cases[0]); ++i) { for (var = 0; var <= 1; ++var) { test_inverse_scalar(&x_scalar, &scalar_cases[i][0], var); - CHECK(rustsecp256k1_v0_9_2_scalar_eq(&x_scalar, &scalar_cases[i][1])); + CHECK(rustsecp256k1_v0_10_0_scalar_eq(&x_scalar, &scalar_cases[i][1])); test_inverse_scalar(&x_scalar, &scalar_cases[i][1], var); - CHECK(rustsecp256k1_v0_9_2_scalar_eq(&x_scalar, &scalar_cases[i][0])); + CHECK(rustsecp256k1_v0_10_0_scalar_eq(&x_scalar, &scalar_cases[i][0])); } } /* Test inputs 0..999 and their respective negations. */ for (i = 0; i < 1000; ++i) { b32[31] = i & 0xff; b32[30] = (i >> 8) & 0xff; - rustsecp256k1_v0_9_2_scalar_set_b32(&x_scalar, b32, NULL); - rustsecp256k1_v0_9_2_fe_set_b32_mod(&x_fe, b32); + rustsecp256k1_v0_10_0_scalar_set_b32(&x_scalar, b32, NULL); + rustsecp256k1_v0_10_0_fe_set_b32_mod(&x_fe, b32); for (var = 0; var <= 1; ++var) { test_inverse_scalar(NULL, &x_scalar, var); test_inverse_field(NULL, &x_fe, var); } - rustsecp256k1_v0_9_2_scalar_negate(&x_scalar, &x_scalar); - rustsecp256k1_v0_9_2_fe_negate(&x_fe, &x_fe, 1); + rustsecp256k1_v0_10_0_scalar_negate(&x_scalar, &x_scalar); + rustsecp256k1_v0_10_0_fe_negate(&x_fe, &x_fe, 1); for (var = 0; var <= 1; ++var) { test_inverse_scalar(NULL, &x_scalar, var); test_inverse_field(NULL, &x_fe, var); @@ -3685,9 +3653,9 @@ static void run_inverse_tests(void) /* test 128*count random inputs; half with testrand256_test, half with testrand256 */ for (testrand = 0; testrand <= 1; ++testrand) { for (i = 0; i < 64 * COUNT; ++i) { - (testrand ? rustsecp256k1_v0_9_2_testrand256_test : rustsecp256k1_v0_9_2_testrand256)(b32); - rustsecp256k1_v0_9_2_scalar_set_b32(&x_scalar, b32, NULL); - rustsecp256k1_v0_9_2_fe_set_b32_mod(&x_fe, b32); + (testrand ? rustsecp256k1_v0_10_0_testrand256_test : rustsecp256k1_v0_10_0_testrand256)(b32); + rustsecp256k1_v0_10_0_scalar_set_b32(&x_scalar, b32, NULL); + rustsecp256k1_v0_10_0_fe_set_b32_mod(&x_fe, b32); for (var = 0; var <= 1; ++var) { test_inverse_scalar(NULL, &x_scalar, var); test_inverse_field(NULL, &x_fe, var); @@ -3698,54 +3666,28 @@ static void run_inverse_tests(void) /***** GROUP TESTS *****/ -static void ge_equals_ge(const rustsecp256k1_v0_9_2_ge *a, const rustsecp256k1_v0_9_2_ge *b) { - CHECK(a->infinity == b->infinity); - if (a->infinity) { - return; - } - CHECK(rustsecp256k1_v0_9_2_fe_equal(&a->x, &b->x)); - CHECK(rustsecp256k1_v0_9_2_fe_equal(&a->y, &b->y)); -} - /* This compares jacobian points including their Z, not just their geometric meaning. */ -static int gej_xyz_equals_gej(const rustsecp256k1_v0_9_2_gej *a, const rustsecp256k1_v0_9_2_gej *b) { - rustsecp256k1_v0_9_2_gej a2; - rustsecp256k1_v0_9_2_gej b2; +static int gej_xyz_equals_gej(const rustsecp256k1_v0_10_0_gej *a, const rustsecp256k1_v0_10_0_gej *b) { + rustsecp256k1_v0_10_0_gej a2; + rustsecp256k1_v0_10_0_gej b2; int ret = 1; ret &= a->infinity == b->infinity; if (ret && !a->infinity) { a2 = *a; b2 = *b; - rustsecp256k1_v0_9_2_fe_normalize(&a2.x); - rustsecp256k1_v0_9_2_fe_normalize(&a2.y); - rustsecp256k1_v0_9_2_fe_normalize(&a2.z); - rustsecp256k1_v0_9_2_fe_normalize(&b2.x); - rustsecp256k1_v0_9_2_fe_normalize(&b2.y); - rustsecp256k1_v0_9_2_fe_normalize(&b2.z); - ret &= rustsecp256k1_v0_9_2_fe_cmp_var(&a2.x, &b2.x) == 0; - ret &= rustsecp256k1_v0_9_2_fe_cmp_var(&a2.y, &b2.y) == 0; - ret &= rustsecp256k1_v0_9_2_fe_cmp_var(&a2.z, &b2.z) == 0; + rustsecp256k1_v0_10_0_fe_normalize(&a2.x); + rustsecp256k1_v0_10_0_fe_normalize(&a2.y); + rustsecp256k1_v0_10_0_fe_normalize(&a2.z); + rustsecp256k1_v0_10_0_fe_normalize(&b2.x); + rustsecp256k1_v0_10_0_fe_normalize(&b2.y); + rustsecp256k1_v0_10_0_fe_normalize(&b2.z); + ret &= rustsecp256k1_v0_10_0_fe_cmp_var(&a2.x, &b2.x) == 0; + ret &= rustsecp256k1_v0_10_0_fe_cmp_var(&a2.y, &b2.y) == 0; + ret &= rustsecp256k1_v0_10_0_fe_cmp_var(&a2.z, &b2.z) == 0; } return ret; } -static void ge_equals_gej(const rustsecp256k1_v0_9_2_ge *a, const rustsecp256k1_v0_9_2_gej *b) { - rustsecp256k1_v0_9_2_fe z2s; - rustsecp256k1_v0_9_2_fe u1, u2, s1, s2; - CHECK(a->infinity == b->infinity); - if (a->infinity) { - return; - } - /* Check a.x * b.z^2 == b.x && a.y * b.z^3 == b.y, to avoid inverses. */ - rustsecp256k1_v0_9_2_fe_sqr(&z2s, &b->z); - rustsecp256k1_v0_9_2_fe_mul(&u1, &a->x, &z2s); - u2 = b->x; - rustsecp256k1_v0_9_2_fe_mul(&s1, &a->y, &z2s); rustsecp256k1_v0_9_2_fe_mul(&s1, &s1, &b->z); - s2 = b->y; - CHECK(rustsecp256k1_v0_9_2_fe_equal(&u1, &u2)); - CHECK(rustsecp256k1_v0_9_2_fe_equal(&s1, &s2)); -} - static void test_ge(void) { int i, i1; int runs = 6; @@ -3755,31 +3697,32 @@ static void test_ge(void) { * negation, and then those two again but with randomized Z coordinate. * - The same is then done for lambda*p1 and lambda^2*p1. */ - rustsecp256k1_v0_9_2_ge *ge = (rustsecp256k1_v0_9_2_ge *)checked_malloc(&CTX->error_callback, sizeof(rustsecp256k1_v0_9_2_ge) * (1 + 4 * runs)); - rustsecp256k1_v0_9_2_gej *gej = (rustsecp256k1_v0_9_2_gej *)checked_malloc(&CTX->error_callback, sizeof(rustsecp256k1_v0_9_2_gej) * (1 + 4 * runs)); - rustsecp256k1_v0_9_2_fe zf, r; - rustsecp256k1_v0_9_2_fe zfi2, zfi3; - - rustsecp256k1_v0_9_2_gej_set_infinity(&gej[0]); - rustsecp256k1_v0_9_2_ge_clear(&ge[0]); - rustsecp256k1_v0_9_2_ge_set_gej_var(&ge[0], &gej[0]); + rustsecp256k1_v0_10_0_ge *ge = (rustsecp256k1_v0_10_0_ge *)checked_malloc(&CTX->error_callback, sizeof(rustsecp256k1_v0_10_0_ge) * (1 + 4 * runs)); + rustsecp256k1_v0_10_0_gej *gej = (rustsecp256k1_v0_10_0_gej *)checked_malloc(&CTX->error_callback, sizeof(rustsecp256k1_v0_10_0_gej) * (1 + 4 * runs)); + rustsecp256k1_v0_10_0_fe zf, r; + rustsecp256k1_v0_10_0_fe zfi2, zfi3; + + rustsecp256k1_v0_10_0_gej_set_infinity(&gej[0]); + rustsecp256k1_v0_10_0_ge_clear(&ge[0]); + rustsecp256k1_v0_10_0_ge_set_gej_var(&ge[0], &gej[0]); for (i = 0; i < runs; i++) { - int j; - rustsecp256k1_v0_9_2_ge g; + int j, k; + rustsecp256k1_v0_10_0_ge g; random_group_element_test(&g); if (i >= runs - 2) { - rustsecp256k1_v0_9_2_ge_mul_lambda(&g, &ge[1]); + rustsecp256k1_v0_10_0_ge_mul_lambda(&g, &ge[1]); + CHECK(!rustsecp256k1_v0_10_0_ge_eq_var(&g, &ge[1])); } if (i >= runs - 1) { - rustsecp256k1_v0_9_2_ge_mul_lambda(&g, &g); + rustsecp256k1_v0_10_0_ge_mul_lambda(&g, &g); } ge[1 + 4 * i] = g; ge[2 + 4 * i] = g; - rustsecp256k1_v0_9_2_ge_neg(&ge[3 + 4 * i], &g); - rustsecp256k1_v0_9_2_ge_neg(&ge[4 + 4 * i], &g); - rustsecp256k1_v0_9_2_gej_set_ge(&gej[1 + 4 * i], &ge[1 + 4 * i]); + rustsecp256k1_v0_10_0_ge_neg(&ge[3 + 4 * i], &g); + rustsecp256k1_v0_10_0_ge_neg(&ge[4 + 4 * i], &g); + rustsecp256k1_v0_10_0_gej_set_ge(&gej[1 + 4 * i], &ge[1 + 4 * i]); random_group_element_jacobian_test(&gej[2 + 4 * i], &ge[2 + 4 * i]); - rustsecp256k1_v0_9_2_gej_set_ge(&gej[3 + 4 * i], &ge[3 + 4 * i]); + rustsecp256k1_v0_10_0_gej_set_ge(&gej[3 + 4 * i], &ge[3 + 4 * i]); random_group_element_jacobian_test(&gej[4 + 4 * i], &ge[4 + 4 * i]); for (j = 0; j < 4; j++) { random_ge_x_magnitude(&ge[1 + j + 4 * i]); @@ -3788,14 +3731,24 @@ static void test_ge(void) { random_gej_y_magnitude(&gej[1 + j + 4 * i]); random_gej_z_magnitude(&gej[1 + j + 4 * i]); } + + for (j = 0; j < 4; ++j) { + for (k = 0; k < 4; ++k) { + int expect_equal = (j >> 1) == (k >> 1); + CHECK(rustsecp256k1_v0_10_0_ge_eq_var(&ge[1 + j + 4 * i], &ge[1 + k + 4 * i]) == expect_equal); + CHECK(rustsecp256k1_v0_10_0_gej_eq_var(&gej[1 + j + 4 * i], &gej[1 + k + 4 * i]) == expect_equal); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&gej[1 + j + 4 * i], &ge[1 + k + 4 * i]) == expect_equal); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&gej[1 + k + 4 * i], &ge[1 + j + 4 * i]) == expect_equal); + } + } } /* Generate random zf, and zfi2 = 1/zf^2, zfi3 = 1/zf^3 */ random_fe_non_zero_test(&zf); random_fe_magnitude(&zf); - rustsecp256k1_v0_9_2_fe_inv_var(&zfi3, &zf); - rustsecp256k1_v0_9_2_fe_sqr(&zfi2, &zfi3); - rustsecp256k1_v0_9_2_fe_mul(&zfi3, &zfi3, &zfi2); + rustsecp256k1_v0_10_0_fe_inv_var(&zfi3, &zf); + rustsecp256k1_v0_10_0_fe_sqr(&zfi2, &zfi3); + rustsecp256k1_v0_10_0_fe_mul(&zfi3, &zfi3, &zfi2); /* Generate random r */ random_fe_non_zero_test(&r); @@ -3804,165 +3757,165 @@ static void test_ge(void) { int i2; for (i2 = 0; i2 < 1 + 4 * runs; i2++) { /* Compute reference result using gej + gej (var). */ - rustsecp256k1_v0_9_2_gej refj, resj; - rustsecp256k1_v0_9_2_ge ref; - rustsecp256k1_v0_9_2_fe zr; - rustsecp256k1_v0_9_2_gej_add_var(&refj, &gej[i1], &gej[i2], rustsecp256k1_v0_9_2_gej_is_infinity(&gej[i1]) ? NULL : &zr); + rustsecp256k1_v0_10_0_gej refj, resj; + rustsecp256k1_v0_10_0_ge ref; + rustsecp256k1_v0_10_0_fe zr; + rustsecp256k1_v0_10_0_gej_add_var(&refj, &gej[i1], &gej[i2], rustsecp256k1_v0_10_0_gej_is_infinity(&gej[i1]) ? NULL : &zr); /* Check Z ratio. */ - if (!rustsecp256k1_v0_9_2_gej_is_infinity(&gej[i1]) && !rustsecp256k1_v0_9_2_gej_is_infinity(&refj)) { - rustsecp256k1_v0_9_2_fe zrz; rustsecp256k1_v0_9_2_fe_mul(&zrz, &zr, &gej[i1].z); - CHECK(rustsecp256k1_v0_9_2_fe_equal(&zrz, &refj.z)); + if (!rustsecp256k1_v0_10_0_gej_is_infinity(&gej[i1]) && !rustsecp256k1_v0_10_0_gej_is_infinity(&refj)) { + rustsecp256k1_v0_10_0_fe zrz; rustsecp256k1_v0_10_0_fe_mul(&zrz, &zr, &gej[i1].z); + CHECK(rustsecp256k1_v0_10_0_fe_equal(&zrz, &refj.z)); } - rustsecp256k1_v0_9_2_ge_set_gej_var(&ref, &refj); + rustsecp256k1_v0_10_0_ge_set_gej_var(&ref, &refj); /* Test gej + ge with Z ratio result (var). */ - rustsecp256k1_v0_9_2_gej_add_ge_var(&resj, &gej[i1], &ge[i2], rustsecp256k1_v0_9_2_gej_is_infinity(&gej[i1]) ? NULL : &zr); - ge_equals_gej(&ref, &resj); - if (!rustsecp256k1_v0_9_2_gej_is_infinity(&gej[i1]) && !rustsecp256k1_v0_9_2_gej_is_infinity(&resj)) { - rustsecp256k1_v0_9_2_fe zrz; rustsecp256k1_v0_9_2_fe_mul(&zrz, &zr, &gej[i1].z); - CHECK(rustsecp256k1_v0_9_2_fe_equal(&zrz, &resj.z)); + rustsecp256k1_v0_10_0_gej_add_ge_var(&resj, &gej[i1], &ge[i2], rustsecp256k1_v0_10_0_gej_is_infinity(&gej[i1]) ? NULL : &zr); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&resj, &ref)); + if (!rustsecp256k1_v0_10_0_gej_is_infinity(&gej[i1]) && !rustsecp256k1_v0_10_0_gej_is_infinity(&resj)) { + rustsecp256k1_v0_10_0_fe zrz; rustsecp256k1_v0_10_0_fe_mul(&zrz, &zr, &gej[i1].z); + CHECK(rustsecp256k1_v0_10_0_fe_equal(&zrz, &resj.z)); } /* Test gej + ge (var, with additional Z factor). */ { - rustsecp256k1_v0_9_2_ge ge2_zfi = ge[i2]; /* the second term with x and y rescaled for z = 1/zf */ - rustsecp256k1_v0_9_2_fe_mul(&ge2_zfi.x, &ge2_zfi.x, &zfi2); - rustsecp256k1_v0_9_2_fe_mul(&ge2_zfi.y, &ge2_zfi.y, &zfi3); + rustsecp256k1_v0_10_0_ge ge2_zfi = ge[i2]; /* the second term with x and y rescaled for z = 1/zf */ + rustsecp256k1_v0_10_0_fe_mul(&ge2_zfi.x, &ge2_zfi.x, &zfi2); + rustsecp256k1_v0_10_0_fe_mul(&ge2_zfi.y, &ge2_zfi.y, &zfi3); random_ge_x_magnitude(&ge2_zfi); random_ge_y_magnitude(&ge2_zfi); - rustsecp256k1_v0_9_2_gej_add_zinv_var(&resj, &gej[i1], &ge2_zfi, &zf); - ge_equals_gej(&ref, &resj); + rustsecp256k1_v0_10_0_gej_add_zinv_var(&resj, &gej[i1], &ge2_zfi, &zf); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&resj, &ref)); } /* Test gej + ge (const). */ if (i2 != 0) { - /* rustsecp256k1_v0_9_2_gej_add_ge does not support its second argument being infinity. */ - rustsecp256k1_v0_9_2_gej_add_ge(&resj, &gej[i1], &ge[i2]); - ge_equals_gej(&ref, &resj); + /* rustsecp256k1_v0_10_0_gej_add_ge does not support its second argument being infinity. */ + rustsecp256k1_v0_10_0_gej_add_ge(&resj, &gej[i1], &ge[i2]); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&resj, &ref)); } /* Test doubling (var). */ if ((i1 == 0 && i2 == 0) || ((i1 + 3)/4 == (i2 + 3)/4 && ((i1 + 3)%4)/2 == ((i2 + 3)%4)/2)) { - rustsecp256k1_v0_9_2_fe zr2; + rustsecp256k1_v0_10_0_fe zr2; /* Normal doubling with Z ratio result. */ - rustsecp256k1_v0_9_2_gej_double_var(&resj, &gej[i1], &zr2); - ge_equals_gej(&ref, &resj); + rustsecp256k1_v0_10_0_gej_double_var(&resj, &gej[i1], &zr2); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&resj, &ref)); /* Check Z ratio. */ - rustsecp256k1_v0_9_2_fe_mul(&zr2, &zr2, &gej[i1].z); - CHECK(rustsecp256k1_v0_9_2_fe_equal(&zr2, &resj.z)); + rustsecp256k1_v0_10_0_fe_mul(&zr2, &zr2, &gej[i1].z); + CHECK(rustsecp256k1_v0_10_0_fe_equal(&zr2, &resj.z)); /* Normal doubling. */ - rustsecp256k1_v0_9_2_gej_double_var(&resj, &gej[i2], NULL); - ge_equals_gej(&ref, &resj); + rustsecp256k1_v0_10_0_gej_double_var(&resj, &gej[i2], NULL); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&resj, &ref)); /* Constant-time doubling. */ - rustsecp256k1_v0_9_2_gej_double(&resj, &gej[i2]); - ge_equals_gej(&ref, &resj); + rustsecp256k1_v0_10_0_gej_double(&resj, &gej[i2]); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&resj, &ref)); } /* Test adding opposites. */ if ((i1 == 0 && i2 == 0) || ((i1 + 3)/4 == (i2 + 3)/4 && ((i1 + 3)%4)/2 != ((i2 + 3)%4)/2)) { - CHECK(rustsecp256k1_v0_9_2_ge_is_infinity(&ref)); + CHECK(rustsecp256k1_v0_10_0_ge_is_infinity(&ref)); } /* Test adding infinity. */ if (i1 == 0) { - CHECK(rustsecp256k1_v0_9_2_ge_is_infinity(&ge[i1])); - CHECK(rustsecp256k1_v0_9_2_gej_is_infinity(&gej[i1])); - ge_equals_gej(&ref, &gej[i2]); + CHECK(rustsecp256k1_v0_10_0_ge_is_infinity(&ge[i1])); + CHECK(rustsecp256k1_v0_10_0_gej_is_infinity(&gej[i1])); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&gej[i2], &ref)); } if (i2 == 0) { - CHECK(rustsecp256k1_v0_9_2_ge_is_infinity(&ge[i2])); - CHECK(rustsecp256k1_v0_9_2_gej_is_infinity(&gej[i2])); - ge_equals_gej(&ref, &gej[i1]); + CHECK(rustsecp256k1_v0_10_0_ge_is_infinity(&ge[i2])); + CHECK(rustsecp256k1_v0_10_0_gej_is_infinity(&gej[i2])); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&gej[i1], &ref)); } } } /* Test adding all points together in random order equals infinity. */ { - rustsecp256k1_v0_9_2_gej sum = SECP256K1_GEJ_CONST_INFINITY; - rustsecp256k1_v0_9_2_gej *gej_shuffled = (rustsecp256k1_v0_9_2_gej *)checked_malloc(&CTX->error_callback, (4 * runs + 1) * sizeof(rustsecp256k1_v0_9_2_gej)); + rustsecp256k1_v0_10_0_gej sum = SECP256K1_GEJ_CONST_INFINITY; + rustsecp256k1_v0_10_0_gej *gej_shuffled = (rustsecp256k1_v0_10_0_gej *)checked_malloc(&CTX->error_callback, (4 * runs + 1) * sizeof(rustsecp256k1_v0_10_0_gej)); for (i = 0; i < 4 * runs + 1; i++) { gej_shuffled[i] = gej[i]; } for (i = 0; i < 4 * runs + 1; i++) { - int swap = i + rustsecp256k1_v0_9_2_testrand_int(4 * runs + 1 - i); + int swap = i + rustsecp256k1_v0_10_0_testrand_int(4 * runs + 1 - i); if (swap != i) { - rustsecp256k1_v0_9_2_gej t = gej_shuffled[i]; + rustsecp256k1_v0_10_0_gej t = gej_shuffled[i]; gej_shuffled[i] = gej_shuffled[swap]; gej_shuffled[swap] = t; } } for (i = 0; i < 4 * runs + 1; i++) { - rustsecp256k1_v0_9_2_gej_add_var(&sum, &sum, &gej_shuffled[i], NULL); + rustsecp256k1_v0_10_0_gej_add_var(&sum, &sum, &gej_shuffled[i], NULL); } - CHECK(rustsecp256k1_v0_9_2_gej_is_infinity(&sum)); + CHECK(rustsecp256k1_v0_10_0_gej_is_infinity(&sum)); free(gej_shuffled); } /* Test batch gej -> ge conversion without known z ratios. */ { - rustsecp256k1_v0_9_2_ge *ge_set_all = (rustsecp256k1_v0_9_2_ge *)checked_malloc(&CTX->error_callback, (4 * runs + 1) * sizeof(rustsecp256k1_v0_9_2_ge)); - rustsecp256k1_v0_9_2_ge_set_all_gej_var(ge_set_all, gej, 4 * runs + 1); + rustsecp256k1_v0_10_0_ge *ge_set_all = (rustsecp256k1_v0_10_0_ge *)checked_malloc(&CTX->error_callback, (4 * runs + 1) * sizeof(rustsecp256k1_v0_10_0_ge)); + rustsecp256k1_v0_10_0_ge_set_all_gej_var(ge_set_all, gej, 4 * runs + 1); for (i = 0; i < 4 * runs + 1; i++) { - rustsecp256k1_v0_9_2_fe s; + rustsecp256k1_v0_10_0_fe s; random_fe_non_zero(&s); - rustsecp256k1_v0_9_2_gej_rescale(&gej[i], &s); - ge_equals_gej(&ge_set_all[i], &gej[i]); + rustsecp256k1_v0_10_0_gej_rescale(&gej[i], &s); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&gej[i], &ge_set_all[i])); } free(ge_set_all); } /* Test that all elements have X coordinates on the curve. */ for (i = 1; i < 4 * runs + 1; i++) { - rustsecp256k1_v0_9_2_fe n; - CHECK(rustsecp256k1_v0_9_2_ge_x_on_curve_var(&ge[i].x)); + rustsecp256k1_v0_10_0_fe n; + CHECK(rustsecp256k1_v0_10_0_ge_x_on_curve_var(&ge[i].x)); /* And the same holds after random rescaling. */ - rustsecp256k1_v0_9_2_fe_mul(&n, &zf, &ge[i].x); - CHECK(rustsecp256k1_v0_9_2_ge_x_frac_on_curve_var(&n, &zf)); + rustsecp256k1_v0_10_0_fe_mul(&n, &zf, &ge[i].x); + CHECK(rustsecp256k1_v0_10_0_ge_x_frac_on_curve_var(&n, &zf)); } - /* Test correspondence of rustsecp256k1_v0_9_2_ge_x{,_frac}_on_curve_var with ge_set_xo. */ + /* Test correspondence of rustsecp256k1_v0_10_0_ge_x{,_frac}_on_curve_var with ge_set_xo. */ { - rustsecp256k1_v0_9_2_fe n; - rustsecp256k1_v0_9_2_ge q; + rustsecp256k1_v0_10_0_fe n; + rustsecp256k1_v0_10_0_ge q; int ret_on_curve, ret_frac_on_curve, ret_set_xo; - rustsecp256k1_v0_9_2_fe_mul(&n, &zf, &r); - ret_on_curve = rustsecp256k1_v0_9_2_ge_x_on_curve_var(&r); - ret_frac_on_curve = rustsecp256k1_v0_9_2_ge_x_frac_on_curve_var(&n, &zf); - ret_set_xo = rustsecp256k1_v0_9_2_ge_set_xo_var(&q, &r, 0); + rustsecp256k1_v0_10_0_fe_mul(&n, &zf, &r); + ret_on_curve = rustsecp256k1_v0_10_0_ge_x_on_curve_var(&r); + ret_frac_on_curve = rustsecp256k1_v0_10_0_ge_x_frac_on_curve_var(&n, &zf); + ret_set_xo = rustsecp256k1_v0_10_0_ge_set_xo_var(&q, &r, 0); CHECK(ret_on_curve == ret_frac_on_curve); CHECK(ret_on_curve == ret_set_xo); - if (ret_set_xo) CHECK(rustsecp256k1_v0_9_2_fe_equal(&r, &q.x)); + if (ret_set_xo) CHECK(rustsecp256k1_v0_10_0_fe_equal(&r, &q.x)); } /* Test batch gej -> ge conversion with many infinities. */ for (i = 0; i < 4 * runs + 1; i++) { int odd; random_group_element_test(&ge[i]); - odd = rustsecp256k1_v0_9_2_fe_is_odd(&ge[i].x); + odd = rustsecp256k1_v0_10_0_fe_is_odd(&ge[i].x); CHECK(odd == 0 || odd == 1); /* randomly set half the points to infinity */ if (odd == i % 2) { - rustsecp256k1_v0_9_2_ge_set_infinity(&ge[i]); + rustsecp256k1_v0_10_0_ge_set_infinity(&ge[i]); } - rustsecp256k1_v0_9_2_gej_set_ge(&gej[i], &ge[i]); + rustsecp256k1_v0_10_0_gej_set_ge(&gej[i], &ge[i]); } /* batch convert */ - rustsecp256k1_v0_9_2_ge_set_all_gej_var(ge, gej, 4 * runs + 1); + rustsecp256k1_v0_10_0_ge_set_all_gej_var(ge, gej, 4 * runs + 1); /* check result */ for (i = 0; i < 4 * runs + 1; i++) { - ge_equals_gej(&ge[i], &gej[i]); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&gej[i], &ge[i])); } /* Test batch gej -> ge conversion with all infinities. */ for (i = 0; i < 4 * runs + 1; i++) { - rustsecp256k1_v0_9_2_gej_set_infinity(&gej[i]); + rustsecp256k1_v0_10_0_gej_set_infinity(&gej[i]); } /* batch convert */ - rustsecp256k1_v0_9_2_ge_set_all_gej_var(ge, gej, 4 * runs + 1); + rustsecp256k1_v0_10_0_ge_set_all_gej_var(ge, gej, 4 * runs + 1); /* check result */ for (i = 0; i < 4 * runs + 1; i++) { - CHECK(rustsecp256k1_v0_9_2_ge_is_infinity(&ge[i])); + CHECK(rustsecp256k1_v0_10_0_ge_is_infinity(&ge[i])); } free(ge); @@ -3970,33 +3923,33 @@ static void test_ge(void) { } static void test_intialized_inf(void) { - rustsecp256k1_v0_9_2_ge p; - rustsecp256k1_v0_9_2_gej pj, npj, infj1, infj2, infj3; - rustsecp256k1_v0_9_2_fe zinv; + rustsecp256k1_v0_10_0_ge p; + rustsecp256k1_v0_10_0_gej pj, npj, infj1, infj2, infj3; + rustsecp256k1_v0_10_0_fe zinv; /* Test that adding P+(-P) results in a fully initialized infinity*/ random_group_element_test(&p); - rustsecp256k1_v0_9_2_gej_set_ge(&pj, &p); - rustsecp256k1_v0_9_2_gej_neg(&npj, &pj); + rustsecp256k1_v0_10_0_gej_set_ge(&pj, &p); + rustsecp256k1_v0_10_0_gej_neg(&npj, &pj); - rustsecp256k1_v0_9_2_gej_add_var(&infj1, &pj, &npj, NULL); - CHECK(rustsecp256k1_v0_9_2_gej_is_infinity(&infj1)); - CHECK(rustsecp256k1_v0_9_2_fe_is_zero(&infj1.x)); - CHECK(rustsecp256k1_v0_9_2_fe_is_zero(&infj1.y)); - CHECK(rustsecp256k1_v0_9_2_fe_is_zero(&infj1.z)); + rustsecp256k1_v0_10_0_gej_add_var(&infj1, &pj, &npj, NULL); + CHECK(rustsecp256k1_v0_10_0_gej_is_infinity(&infj1)); + CHECK(rustsecp256k1_v0_10_0_fe_is_zero(&infj1.x)); + CHECK(rustsecp256k1_v0_10_0_fe_is_zero(&infj1.y)); + CHECK(rustsecp256k1_v0_10_0_fe_is_zero(&infj1.z)); - rustsecp256k1_v0_9_2_gej_add_ge_var(&infj2, &npj, &p, NULL); - CHECK(rustsecp256k1_v0_9_2_gej_is_infinity(&infj2)); - CHECK(rustsecp256k1_v0_9_2_fe_is_zero(&infj2.x)); - CHECK(rustsecp256k1_v0_9_2_fe_is_zero(&infj2.y)); - CHECK(rustsecp256k1_v0_9_2_fe_is_zero(&infj2.z)); + rustsecp256k1_v0_10_0_gej_add_ge_var(&infj2, &npj, &p, NULL); + CHECK(rustsecp256k1_v0_10_0_gej_is_infinity(&infj2)); + CHECK(rustsecp256k1_v0_10_0_fe_is_zero(&infj2.x)); + CHECK(rustsecp256k1_v0_10_0_fe_is_zero(&infj2.y)); + CHECK(rustsecp256k1_v0_10_0_fe_is_zero(&infj2.z)); - rustsecp256k1_v0_9_2_fe_set_int(&zinv, 1); - rustsecp256k1_v0_9_2_gej_add_zinv_var(&infj3, &npj, &p, &zinv); - CHECK(rustsecp256k1_v0_9_2_gej_is_infinity(&infj3)); - CHECK(rustsecp256k1_v0_9_2_fe_is_zero(&infj3.x)); - CHECK(rustsecp256k1_v0_9_2_fe_is_zero(&infj3.y)); - CHECK(rustsecp256k1_v0_9_2_fe_is_zero(&infj3.z)); + rustsecp256k1_v0_10_0_fe_set_int(&zinv, 1); + rustsecp256k1_v0_10_0_gej_add_zinv_var(&infj3, &npj, &p, &zinv); + CHECK(rustsecp256k1_v0_10_0_gej_is_infinity(&infj3)); + CHECK(rustsecp256k1_v0_10_0_fe_is_zero(&infj3.x)); + CHECK(rustsecp256k1_v0_10_0_fe_is_zero(&infj3.y)); + CHECK(rustsecp256k1_v0_10_0_fe_is_zero(&infj3.z)); } @@ -4016,7 +3969,7 @@ static void test_add_neg_y_diff_x(void) { * * These points were generated in sage as * - * load("rustsecp256k1_v0_9_2_params.sage") + * load("rustsecp256k1_v0_10_0_params.sage") * * # random "bad pair" * P = C.random_element() @@ -4025,40 +3978,40 @@ static void test_add_neg_y_diff_x(void) { * print(" Q: %x %x" % Q.xy()) * print("P + Q: %x %x" % (P + Q).xy()) */ - rustsecp256k1_v0_9_2_gej aj = SECP256K1_GEJ_CONST( + rustsecp256k1_v0_10_0_gej aj = SECP256K1_GEJ_CONST( 0x8d24cd95, 0x0a355af1, 0x3c543505, 0x44238d30, 0x0643d79f, 0x05a59614, 0x2f8ec030, 0xd58977cb, 0x001e337a, 0x38093dcd, 0x6c0f386d, 0x0b1293a8, 0x4d72c879, 0xd7681924, 0x44e6d2f3, 0x9190117d ); - rustsecp256k1_v0_9_2_gej bj = SECP256K1_GEJ_CONST( + rustsecp256k1_v0_10_0_gej bj = SECP256K1_GEJ_CONST( 0xc7b74206, 0x1f788cd9, 0xabd0937d, 0x164a0d86, 0x95f6ff75, 0xf19a4ce9, 0xd013bd7b, 0xbf92d2a7, 0xffe1cc85, 0xc7f6c232, 0x93f0c792, 0xf4ed6c57, 0xb28d3786, 0x2897e6db, 0xbb192d0b, 0x6e6feab2 ); - rustsecp256k1_v0_9_2_gej sumj = SECP256K1_GEJ_CONST( + rustsecp256k1_v0_10_0_gej sumj = SECP256K1_GEJ_CONST( 0x671a63c0, 0x3efdad4c, 0x389a7798, 0x24356027, 0xb3d69010, 0x278625c3, 0x5c86d390, 0x184a8f7a, 0x5f6409c2, 0x2ce01f2b, 0x511fd375, 0x25071d08, 0xda651801, 0x70e95caf, 0x8f0d893c, 0xbed8fbbe ); - rustsecp256k1_v0_9_2_ge b; - rustsecp256k1_v0_9_2_gej resj; - rustsecp256k1_v0_9_2_ge res; - rustsecp256k1_v0_9_2_ge_set_gej(&b, &bj); + rustsecp256k1_v0_10_0_ge b; + rustsecp256k1_v0_10_0_gej resj; + rustsecp256k1_v0_10_0_ge res; + rustsecp256k1_v0_10_0_ge_set_gej(&b, &bj); - rustsecp256k1_v0_9_2_gej_add_var(&resj, &aj, &bj, NULL); - rustsecp256k1_v0_9_2_ge_set_gej(&res, &resj); - ge_equals_gej(&res, &sumj); + rustsecp256k1_v0_10_0_gej_add_var(&resj, &aj, &bj, NULL); + rustsecp256k1_v0_10_0_ge_set_gej(&res, &resj); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&sumj, &res)); - rustsecp256k1_v0_9_2_gej_add_ge(&resj, &aj, &b); - rustsecp256k1_v0_9_2_ge_set_gej(&res, &resj); - ge_equals_gej(&res, &sumj); + rustsecp256k1_v0_10_0_gej_add_ge(&resj, &aj, &b); + rustsecp256k1_v0_10_0_ge_set_gej(&res, &resj); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&sumj, &res)); - rustsecp256k1_v0_9_2_gej_add_ge_var(&resj, &aj, &b, NULL); - rustsecp256k1_v0_9_2_ge_set_gej(&res, &resj); - ge_equals_gej(&res, &sumj); + rustsecp256k1_v0_10_0_gej_add_ge_var(&resj, &aj, &b, NULL); + rustsecp256k1_v0_10_0_ge_set_gej(&res, &resj); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&sumj, &res)); } static void run_ge(void) { @@ -4070,22 +4023,22 @@ static void run_ge(void) { test_intialized_inf(); } -static void test_gej_cmov(const rustsecp256k1_v0_9_2_gej *a, const rustsecp256k1_v0_9_2_gej *b) { - rustsecp256k1_v0_9_2_gej t = *a; - rustsecp256k1_v0_9_2_gej_cmov(&t, b, 0); +static void test_gej_cmov(const rustsecp256k1_v0_10_0_gej *a, const rustsecp256k1_v0_10_0_gej *b) { + rustsecp256k1_v0_10_0_gej t = *a; + rustsecp256k1_v0_10_0_gej_cmov(&t, b, 0); CHECK(gej_xyz_equals_gej(&t, a)); - rustsecp256k1_v0_9_2_gej_cmov(&t, b, 1); + rustsecp256k1_v0_10_0_gej_cmov(&t, b, 1); CHECK(gej_xyz_equals_gej(&t, b)); } static void run_gej(void) { int i; - rustsecp256k1_v0_9_2_gej a, b; + rustsecp256k1_v0_10_0_gej a, b; - /* Tests for rustsecp256k1_v0_9_2_gej_cmov */ + /* Tests for rustsecp256k1_v0_10_0_gej_cmov */ for (i = 0; i < COUNT; i++) { - rustsecp256k1_v0_9_2_gej_set_infinity(&a); - rustsecp256k1_v0_9_2_gej_set_infinity(&b); + rustsecp256k1_v0_10_0_gej_set_infinity(&a); + rustsecp256k1_v0_10_0_gej_set_infinity(&b); test_gej_cmov(&a, &b); random_gej_test(&a); @@ -4100,42 +4053,42 @@ static void run_gej(void) { test_gej_cmov(&b, &a); } - /* Tests for rustsecp256k1_v0_9_2_gej_eq_var */ + /* Tests for rustsecp256k1_v0_10_0_gej_eq_var */ for (i = 0; i < COUNT; i++) { - rustsecp256k1_v0_9_2_fe fe; + rustsecp256k1_v0_10_0_fe fe; random_gej_test(&a); random_gej_test(&b); - CHECK(!rustsecp256k1_v0_9_2_gej_eq_var(&a, &b)); + CHECK(!rustsecp256k1_v0_10_0_gej_eq_var(&a, &b)); b = a; random_fe_non_zero_test(&fe); - rustsecp256k1_v0_9_2_gej_rescale(&a, &fe); - CHECK(rustsecp256k1_v0_9_2_gej_eq_var(&a, &b)); + rustsecp256k1_v0_10_0_gej_rescale(&a, &fe); + CHECK(rustsecp256k1_v0_10_0_gej_eq_var(&a, &b)); } } static void test_ec_combine(void) { - rustsecp256k1_v0_9_2_scalar sum = rustsecp256k1_v0_9_2_scalar_zero; - rustsecp256k1_v0_9_2_pubkey data[6]; - const rustsecp256k1_v0_9_2_pubkey* d[6]; - rustsecp256k1_v0_9_2_pubkey sd; - rustsecp256k1_v0_9_2_pubkey sd2; - rustsecp256k1_v0_9_2_gej Qj; - rustsecp256k1_v0_9_2_ge Q; + rustsecp256k1_v0_10_0_scalar sum = rustsecp256k1_v0_10_0_scalar_zero; + rustsecp256k1_v0_10_0_pubkey data[6]; + const rustsecp256k1_v0_10_0_pubkey* d[6]; + rustsecp256k1_v0_10_0_pubkey sd; + rustsecp256k1_v0_10_0_pubkey sd2; + rustsecp256k1_v0_10_0_gej Qj; + rustsecp256k1_v0_10_0_ge Q; int i; for (i = 1; i <= 6; i++) { - rustsecp256k1_v0_9_2_scalar s; + rustsecp256k1_v0_10_0_scalar s; random_scalar_order_test(&s); - rustsecp256k1_v0_9_2_scalar_add(&sum, &sum, &s); - rustsecp256k1_v0_9_2_ecmult_gen(&CTX->ecmult_gen_ctx, &Qj, &s); - rustsecp256k1_v0_9_2_ge_set_gej(&Q, &Qj); - rustsecp256k1_v0_9_2_pubkey_save(&data[i - 1], &Q); + rustsecp256k1_v0_10_0_scalar_add(&sum, &sum, &s); + rustsecp256k1_v0_10_0_ecmult_gen(&CTX->ecmult_gen_ctx, &Qj, &s); + rustsecp256k1_v0_10_0_ge_set_gej(&Q, &Qj); + rustsecp256k1_v0_10_0_pubkey_save(&data[i - 1], &Q); d[i - 1] = &data[i - 1]; - rustsecp256k1_v0_9_2_ecmult_gen(&CTX->ecmult_gen_ctx, &Qj, &sum); - rustsecp256k1_v0_9_2_ge_set_gej(&Q, &Qj); - rustsecp256k1_v0_9_2_pubkey_save(&sd, &Q); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_combine(CTX, &sd2, d, i) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&sd, &sd2, sizeof(sd)) == 0); + rustsecp256k1_v0_10_0_ecmult_gen(&CTX->ecmult_gen_ctx, &Qj, &sum); + rustsecp256k1_v0_10_0_ge_set_gej(&Q, &Qj); + rustsecp256k1_v0_10_0_pubkey_save(&sd, &Q); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_combine(CTX, &sd2, d, i) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&sd, &sd2, sizeof(sd)) == 0); } } @@ -4146,45 +4099,45 @@ static void run_ec_combine(void) { } } -static void test_group_decompress(const rustsecp256k1_v0_9_2_fe* x) { +static void test_group_decompress(const rustsecp256k1_v0_10_0_fe* x) { /* The input itself, normalized. */ - rustsecp256k1_v0_9_2_fe fex = *x; + rustsecp256k1_v0_10_0_fe fex = *x; /* Results of set_xo_var(..., 0), set_xo_var(..., 1). */ - rustsecp256k1_v0_9_2_ge ge_even, ge_odd; + rustsecp256k1_v0_10_0_ge ge_even, ge_odd; /* Return values of the above calls. */ int res_even, res_odd; - rustsecp256k1_v0_9_2_fe_normalize_var(&fex); + rustsecp256k1_v0_10_0_fe_normalize_var(&fex); - res_even = rustsecp256k1_v0_9_2_ge_set_xo_var(&ge_even, &fex, 0); - res_odd = rustsecp256k1_v0_9_2_ge_set_xo_var(&ge_odd, &fex, 1); + res_even = rustsecp256k1_v0_10_0_ge_set_xo_var(&ge_even, &fex, 0); + res_odd = rustsecp256k1_v0_10_0_ge_set_xo_var(&ge_odd, &fex, 1); CHECK(res_even == res_odd); if (res_even) { - rustsecp256k1_v0_9_2_fe_normalize_var(&ge_odd.x); - rustsecp256k1_v0_9_2_fe_normalize_var(&ge_even.x); - rustsecp256k1_v0_9_2_fe_normalize_var(&ge_odd.y); - rustsecp256k1_v0_9_2_fe_normalize_var(&ge_even.y); + rustsecp256k1_v0_10_0_fe_normalize_var(&ge_odd.x); + rustsecp256k1_v0_10_0_fe_normalize_var(&ge_even.x); + rustsecp256k1_v0_10_0_fe_normalize_var(&ge_odd.y); + rustsecp256k1_v0_10_0_fe_normalize_var(&ge_even.y); /* No infinity allowed. */ CHECK(!ge_even.infinity); CHECK(!ge_odd.infinity); /* Check that the x coordinates check out. */ - CHECK(rustsecp256k1_v0_9_2_fe_equal(&ge_even.x, x)); - CHECK(rustsecp256k1_v0_9_2_fe_equal(&ge_odd.x, x)); + CHECK(rustsecp256k1_v0_10_0_fe_equal(&ge_even.x, x)); + CHECK(rustsecp256k1_v0_10_0_fe_equal(&ge_odd.x, x)); /* Check odd/even Y in ge_odd, ge_even. */ - CHECK(rustsecp256k1_v0_9_2_fe_is_odd(&ge_odd.y)); - CHECK(!rustsecp256k1_v0_9_2_fe_is_odd(&ge_even.y)); + CHECK(rustsecp256k1_v0_10_0_fe_is_odd(&ge_odd.y)); + CHECK(!rustsecp256k1_v0_10_0_fe_is_odd(&ge_even.y)); } } static void run_group_decompress(void) { int i; for (i = 0; i < COUNT * 4; i++) { - rustsecp256k1_v0_9_2_fe fe; + rustsecp256k1_v0_10_0_fe fe; random_fe_test(&fe); test_group_decompress(&fe); } @@ -4192,7 +4145,7 @@ static void run_group_decompress(void) { /***** ECMULT TESTS *****/ -static void test_pre_g_table(const rustsecp256k1_v0_9_2_ge_storage * pre_g, size_t n) { +static void test_pre_g_table(const rustsecp256k1_v0_10_0_ge_storage * pre_g, size_t n) { /* Tests the pre_g / pre_g_128 tables for consistency. * For independent verification we take a "geometric" approach to verification. * We check that every entry is on-curve. @@ -4203,168 +4156,168 @@ static void test_pre_g_table(const rustsecp256k1_v0_9_2_ge_storage * pre_g, size * * Checking the table's generators are correct is done in run_ecmult_pre_g. */ - rustsecp256k1_v0_9_2_gej g2; - rustsecp256k1_v0_9_2_ge p, q, gg; - rustsecp256k1_v0_9_2_fe dpx, dpy, dqx, dqy; + rustsecp256k1_v0_10_0_gej g2; + rustsecp256k1_v0_10_0_ge p, q, gg; + rustsecp256k1_v0_10_0_fe dpx, dpy, dqx, dqy; size_t i; CHECK(0 < n); - rustsecp256k1_v0_9_2_ge_from_storage(&p, &pre_g[0]); - CHECK(rustsecp256k1_v0_9_2_ge_is_valid_var(&p)); + rustsecp256k1_v0_10_0_ge_from_storage(&p, &pre_g[0]); + CHECK(rustsecp256k1_v0_10_0_ge_is_valid_var(&p)); - rustsecp256k1_v0_9_2_gej_set_ge(&g2, &p); - rustsecp256k1_v0_9_2_gej_double_var(&g2, &g2, NULL); - rustsecp256k1_v0_9_2_ge_set_gej_var(&gg, &g2); + rustsecp256k1_v0_10_0_gej_set_ge(&g2, &p); + rustsecp256k1_v0_10_0_gej_double_var(&g2, &g2, NULL); + rustsecp256k1_v0_10_0_ge_set_gej_var(&gg, &g2); for (i = 1; i < n; ++i) { - rustsecp256k1_v0_9_2_fe_negate(&dpx, &p.x, 1); rustsecp256k1_v0_9_2_fe_add(&dpx, &gg.x); rustsecp256k1_v0_9_2_fe_normalize_weak(&dpx); - rustsecp256k1_v0_9_2_fe_negate(&dpy, &p.y, 1); rustsecp256k1_v0_9_2_fe_add(&dpy, &gg.y); rustsecp256k1_v0_9_2_fe_normalize_weak(&dpy); + rustsecp256k1_v0_10_0_fe_negate(&dpx, &p.x, 1); rustsecp256k1_v0_10_0_fe_add(&dpx, &gg.x); rustsecp256k1_v0_10_0_fe_normalize_weak(&dpx); + rustsecp256k1_v0_10_0_fe_negate(&dpy, &p.y, 1); rustsecp256k1_v0_10_0_fe_add(&dpy, &gg.y); rustsecp256k1_v0_10_0_fe_normalize_weak(&dpy); /* Check that p is not equal to gg */ - CHECK(!rustsecp256k1_v0_9_2_fe_normalizes_to_zero_var(&dpx) || !rustsecp256k1_v0_9_2_fe_normalizes_to_zero_var(&dpy)); + CHECK(!rustsecp256k1_v0_10_0_fe_normalizes_to_zero_var(&dpx) || !rustsecp256k1_v0_10_0_fe_normalizes_to_zero_var(&dpy)); - rustsecp256k1_v0_9_2_ge_from_storage(&q, &pre_g[i]); - CHECK(rustsecp256k1_v0_9_2_ge_is_valid_var(&q)); + rustsecp256k1_v0_10_0_ge_from_storage(&q, &pre_g[i]); + CHECK(rustsecp256k1_v0_10_0_ge_is_valid_var(&q)); - rustsecp256k1_v0_9_2_fe_negate(&dqx, &q.x, 1); rustsecp256k1_v0_9_2_fe_add(&dqx, &gg.x); - dqy = q.y; rustsecp256k1_v0_9_2_fe_add(&dqy, &gg.y); + rustsecp256k1_v0_10_0_fe_negate(&dqx, &q.x, 1); rustsecp256k1_v0_10_0_fe_add(&dqx, &gg.x); + dqy = q.y; rustsecp256k1_v0_10_0_fe_add(&dqy, &gg.y); /* Check that -q is not equal to gg */ - CHECK(!rustsecp256k1_v0_9_2_fe_normalizes_to_zero_var(&dqx) || !rustsecp256k1_v0_9_2_fe_normalizes_to_zero_var(&dqy)); + CHECK(!rustsecp256k1_v0_10_0_fe_normalizes_to_zero_var(&dqx) || !rustsecp256k1_v0_10_0_fe_normalizes_to_zero_var(&dqy)); /* Check that -q is not equal to p */ - CHECK(!rustsecp256k1_v0_9_2_fe_equal(&dpx, &dqx) || !rustsecp256k1_v0_9_2_fe_equal(&dpy, &dqy)); + CHECK(!rustsecp256k1_v0_10_0_fe_equal(&dpx, &dqx) || !rustsecp256k1_v0_10_0_fe_equal(&dpy, &dqy)); /* Check that p, -q and gg are colinear */ - rustsecp256k1_v0_9_2_fe_mul(&dpx, &dpx, &dqy); - rustsecp256k1_v0_9_2_fe_mul(&dpy, &dpy, &dqx); - CHECK(rustsecp256k1_v0_9_2_fe_equal(&dpx, &dpy)); + rustsecp256k1_v0_10_0_fe_mul(&dpx, &dpx, &dqy); + rustsecp256k1_v0_10_0_fe_mul(&dpy, &dpy, &dqx); + CHECK(rustsecp256k1_v0_10_0_fe_equal(&dpx, &dpy)); p = q; } } static void run_ecmult_pre_g(void) { - rustsecp256k1_v0_9_2_ge_storage gs; - rustsecp256k1_v0_9_2_gej gj; - rustsecp256k1_v0_9_2_ge g; + rustsecp256k1_v0_10_0_ge_storage gs; + rustsecp256k1_v0_10_0_gej gj; + rustsecp256k1_v0_10_0_ge g; size_t i; /* Check that the pre_g and pre_g_128 tables are consistent. */ - test_pre_g_table(rustsecp256k1_v0_9_2_pre_g, ECMULT_TABLE_SIZE(WINDOW_G)); - test_pre_g_table(rustsecp256k1_v0_9_2_pre_g_128, ECMULT_TABLE_SIZE(WINDOW_G)); + test_pre_g_table(rustsecp256k1_v0_10_0_pre_g, ECMULT_TABLE_SIZE(WINDOW_G)); + test_pre_g_table(rustsecp256k1_v0_10_0_pre_g_128, ECMULT_TABLE_SIZE(WINDOW_G)); /* Check the first entry from the pre_g table. */ - rustsecp256k1_v0_9_2_ge_to_storage(&gs, &rustsecp256k1_v0_9_2_ge_const_g); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&gs, &rustsecp256k1_v0_9_2_pre_g[0], sizeof(gs)) == 0); + rustsecp256k1_v0_10_0_ge_to_storage(&gs, &rustsecp256k1_v0_10_0_ge_const_g); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&gs, &rustsecp256k1_v0_10_0_pre_g[0], sizeof(gs)) == 0); /* Check the first entry from the pre_g_128 table. */ - rustsecp256k1_v0_9_2_gej_set_ge(&gj, &rustsecp256k1_v0_9_2_ge_const_g); + rustsecp256k1_v0_10_0_gej_set_ge(&gj, &rustsecp256k1_v0_10_0_ge_const_g); for (i = 0; i < 128; ++i) { - rustsecp256k1_v0_9_2_gej_double_var(&gj, &gj, NULL); + rustsecp256k1_v0_10_0_gej_double_var(&gj, &gj, NULL); } - rustsecp256k1_v0_9_2_ge_set_gej(&g, &gj); - rustsecp256k1_v0_9_2_ge_to_storage(&gs, &g); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&gs, &rustsecp256k1_v0_9_2_pre_g_128[0], sizeof(gs)) == 0); + rustsecp256k1_v0_10_0_ge_set_gej(&g, &gj); + rustsecp256k1_v0_10_0_ge_to_storage(&gs, &g); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&gs, &rustsecp256k1_v0_10_0_pre_g_128[0], sizeof(gs)) == 0); } static void run_ecmult_chain(void) { /* random starting point A (on the curve) */ - rustsecp256k1_v0_9_2_gej a = SECP256K1_GEJ_CONST( + rustsecp256k1_v0_10_0_gej a = SECP256K1_GEJ_CONST( 0x8b30bbe9, 0xae2a9906, 0x96b22f67, 0x0709dff3, 0x727fd8bc, 0x04d3362c, 0x6c7bf458, 0xe2846004, 0xa357ae91, 0x5c4a6528, 0x1309edf2, 0x0504740f, 0x0eb33439, 0x90216b4f, 0x81063cb6, 0x5f2f7e0f ); /* two random initial factors xn and gn */ - rustsecp256k1_v0_9_2_scalar xn = SECP256K1_SCALAR_CONST( + rustsecp256k1_v0_10_0_scalar xn = SECP256K1_SCALAR_CONST( 0x84cc5452, 0xf7fde1ed, 0xb4d38a8c, 0xe9b1b84c, 0xcef31f14, 0x6e569be9, 0x705d357a, 0x42985407 ); - rustsecp256k1_v0_9_2_scalar gn = SECP256K1_SCALAR_CONST( + rustsecp256k1_v0_10_0_scalar gn = SECP256K1_SCALAR_CONST( 0xa1e58d22, 0x553dcd42, 0xb2398062, 0x5d4c57a9, 0x6e9323d4, 0x2b3152e5, 0xca2c3990, 0xedc7c9de ); /* two small multipliers to be applied to xn and gn in every iteration: */ - static const rustsecp256k1_v0_9_2_scalar xf = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0x1337); - static const rustsecp256k1_v0_9_2_scalar gf = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0x7113); + static const rustsecp256k1_v0_10_0_scalar xf = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0x1337); + static const rustsecp256k1_v0_10_0_scalar gf = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0x7113); /* accumulators with the resulting coefficients to A and G */ - rustsecp256k1_v0_9_2_scalar ae = rustsecp256k1_v0_9_2_scalar_one; - rustsecp256k1_v0_9_2_scalar ge = rustsecp256k1_v0_9_2_scalar_zero; + rustsecp256k1_v0_10_0_scalar ae = rustsecp256k1_v0_10_0_scalar_one; + rustsecp256k1_v0_10_0_scalar ge = rustsecp256k1_v0_10_0_scalar_zero; /* actual points */ - rustsecp256k1_v0_9_2_gej x; - rustsecp256k1_v0_9_2_gej x2; + rustsecp256k1_v0_10_0_gej x; + rustsecp256k1_v0_10_0_gej x2; int i; /* the point being computed */ x = a; for (i = 0; i < 200*COUNT; i++) { /* in each iteration, compute X = xn*X + gn*G; */ - rustsecp256k1_v0_9_2_ecmult(&x, &x, &xn, &gn); + rustsecp256k1_v0_10_0_ecmult(&x, &x, &xn, &gn); /* also compute ae and ge: the actual accumulated factors for A and G */ /* if X was (ae*A+ge*G), xn*X + gn*G results in (xn*ae*A + (xn*ge+gn)*G) */ - rustsecp256k1_v0_9_2_scalar_mul(&ae, &ae, &xn); - rustsecp256k1_v0_9_2_scalar_mul(&ge, &ge, &xn); - rustsecp256k1_v0_9_2_scalar_add(&ge, &ge, &gn); + rustsecp256k1_v0_10_0_scalar_mul(&ae, &ae, &xn); + rustsecp256k1_v0_10_0_scalar_mul(&ge, &ge, &xn); + rustsecp256k1_v0_10_0_scalar_add(&ge, &ge, &gn); /* modify xn and gn */ - rustsecp256k1_v0_9_2_scalar_mul(&xn, &xn, &xf); - rustsecp256k1_v0_9_2_scalar_mul(&gn, &gn, &gf); + rustsecp256k1_v0_10_0_scalar_mul(&xn, &xn, &xf); + rustsecp256k1_v0_10_0_scalar_mul(&gn, &gn, &gf); /* verify */ if (i == 19999) { /* expected result after 19999 iterations */ - rustsecp256k1_v0_9_2_gej rp = SECP256K1_GEJ_CONST( + rustsecp256k1_v0_10_0_gej rp = SECP256K1_GEJ_CONST( 0xD6E96687, 0xF9B10D09, 0x2A6F3543, 0x9D86CEBE, 0xA4535D0D, 0x409F5358, 0x6440BD74, 0xB933E830, 0xB95CBCA2, 0xC77DA786, 0x539BE8FD, 0x53354D2D, 0x3B4F566A, 0xE6580454, 0x07ED6015, 0xEE1B2A88 ); - CHECK(rustsecp256k1_v0_9_2_gej_eq_var(&rp, &x)); + CHECK(rustsecp256k1_v0_10_0_gej_eq_var(&rp, &x)); } } /* redo the computation, but directly with the resulting ae and ge coefficients: */ - rustsecp256k1_v0_9_2_ecmult(&x2, &a, &ae, &ge); - CHECK(rustsecp256k1_v0_9_2_gej_eq_var(&x, &x2)); + rustsecp256k1_v0_10_0_ecmult(&x2, &a, &ae, &ge); + CHECK(rustsecp256k1_v0_10_0_gej_eq_var(&x, &x2)); } -static void test_point_times_order(const rustsecp256k1_v0_9_2_gej *point) { +static void test_point_times_order(const rustsecp256k1_v0_10_0_gej *point) { /* X * (point + G) + (order-X) * (pointer + G) = 0 */ - rustsecp256k1_v0_9_2_scalar x; - rustsecp256k1_v0_9_2_scalar nx; - rustsecp256k1_v0_9_2_gej res1, res2; - rustsecp256k1_v0_9_2_ge res3; + rustsecp256k1_v0_10_0_scalar x; + rustsecp256k1_v0_10_0_scalar nx; + rustsecp256k1_v0_10_0_gej res1, res2; + rustsecp256k1_v0_10_0_ge res3; unsigned char pub[65]; size_t psize = 65; random_scalar_order_test(&x); - rustsecp256k1_v0_9_2_scalar_negate(&nx, &x); - rustsecp256k1_v0_9_2_ecmult(&res1, point, &x, &x); /* calc res1 = x * point + x * G; */ - rustsecp256k1_v0_9_2_ecmult(&res2, point, &nx, &nx); /* calc res2 = (order - x) * point + (order - x) * G; */ - rustsecp256k1_v0_9_2_gej_add_var(&res1, &res1, &res2, NULL); - CHECK(rustsecp256k1_v0_9_2_gej_is_infinity(&res1)); - rustsecp256k1_v0_9_2_ge_set_gej(&res3, &res1); - CHECK(rustsecp256k1_v0_9_2_ge_is_infinity(&res3)); - CHECK(rustsecp256k1_v0_9_2_ge_is_valid_var(&res3) == 0); - CHECK(rustsecp256k1_v0_9_2_eckey_pubkey_serialize(&res3, pub, &psize, 0) == 0); + rustsecp256k1_v0_10_0_scalar_negate(&nx, &x); + rustsecp256k1_v0_10_0_ecmult(&res1, point, &x, &x); /* calc res1 = x * point + x * G; */ + rustsecp256k1_v0_10_0_ecmult(&res2, point, &nx, &nx); /* calc res2 = (order - x) * point + (order - x) * G; */ + rustsecp256k1_v0_10_0_gej_add_var(&res1, &res1, &res2, NULL); + CHECK(rustsecp256k1_v0_10_0_gej_is_infinity(&res1)); + rustsecp256k1_v0_10_0_ge_set_gej(&res3, &res1); + CHECK(rustsecp256k1_v0_10_0_ge_is_infinity(&res3)); + CHECK(rustsecp256k1_v0_10_0_ge_is_valid_var(&res3) == 0); + CHECK(rustsecp256k1_v0_10_0_eckey_pubkey_serialize(&res3, pub, &psize, 0) == 0); psize = 65; - CHECK(rustsecp256k1_v0_9_2_eckey_pubkey_serialize(&res3, pub, &psize, 1) == 0); + CHECK(rustsecp256k1_v0_10_0_eckey_pubkey_serialize(&res3, pub, &psize, 1) == 0); /* check zero/one edge cases */ - rustsecp256k1_v0_9_2_ecmult(&res1, point, &rustsecp256k1_v0_9_2_scalar_zero, &rustsecp256k1_v0_9_2_scalar_zero); - rustsecp256k1_v0_9_2_ge_set_gej(&res3, &res1); - CHECK(rustsecp256k1_v0_9_2_ge_is_infinity(&res3)); - rustsecp256k1_v0_9_2_ecmult(&res1, point, &rustsecp256k1_v0_9_2_scalar_one, &rustsecp256k1_v0_9_2_scalar_zero); - rustsecp256k1_v0_9_2_ge_set_gej(&res3, &res1); - ge_equals_gej(&res3, point); - rustsecp256k1_v0_9_2_ecmult(&res1, point, &rustsecp256k1_v0_9_2_scalar_zero, &rustsecp256k1_v0_9_2_scalar_one); - rustsecp256k1_v0_9_2_ge_set_gej(&res3, &res1); - ge_equals_ge(&res3, &rustsecp256k1_v0_9_2_ge_const_g); -} - -/* These scalars reach large (in absolute value) outputs when fed to rustsecp256k1_v0_9_2_scalar_split_lambda. + rustsecp256k1_v0_10_0_ecmult(&res1, point, &rustsecp256k1_v0_10_0_scalar_zero, &rustsecp256k1_v0_10_0_scalar_zero); + rustsecp256k1_v0_10_0_ge_set_gej(&res3, &res1); + CHECK(rustsecp256k1_v0_10_0_ge_is_infinity(&res3)); + rustsecp256k1_v0_10_0_ecmult(&res1, point, &rustsecp256k1_v0_10_0_scalar_one, &rustsecp256k1_v0_10_0_scalar_zero); + rustsecp256k1_v0_10_0_ge_set_gej(&res3, &res1); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(point, &res3)); + rustsecp256k1_v0_10_0_ecmult(&res1, point, &rustsecp256k1_v0_10_0_scalar_zero, &rustsecp256k1_v0_10_0_scalar_one); + rustsecp256k1_v0_10_0_ge_set_gej(&res3, &res1); + CHECK(rustsecp256k1_v0_10_0_ge_eq_var(&rustsecp256k1_v0_10_0_ge_const_g, &res3)); +} + +/* These scalars reach large (in absolute value) outputs when fed to rustsecp256k1_v0_10_0_scalar_split_lambda. * * They are computed as: * - For a in [-2, -1, 0, 1, 2]: * - For b in [-3, -1, 1, 3]: * - Output (a*LAMBDA + (ORDER+b)/2) % ORDER */ -static const rustsecp256k1_v0_9_2_scalar scalars_near_split_bounds[20] = { +static const rustsecp256k1_v0_10_0_scalar scalars_near_split_bounds[20] = { SECP256K1_SCALAR_CONST(0xd938a566, 0x7f479e3e, 0xb5b3c7fa, 0xefdb3749, 0x3aa0585c, 0xc5ea2367, 0xe1b660db, 0x0209e6fc), SECP256K1_SCALAR_CONST(0xd938a566, 0x7f479e3e, 0xb5b3c7fa, 0xefdb3749, 0x3aa0585c, 0xc5ea2367, 0xe1b660db, 0x0209e6fd), SECP256K1_SCALAR_CONST(0xd938a566, 0x7f479e3e, 0xb5b3c7fa, 0xefdb3749, 0x3aa0585c, 0xc5ea2367, 0xe1b660db, 0x0209e6fe), @@ -4387,42 +4340,42 @@ static const rustsecp256k1_v0_9_2_scalar scalars_near_split_bounds[20] = { SECP256K1_SCALAR_CONST(0x26c75a99, 0x80b861c1, 0x4a4c3805, 0x1024c8b4, 0x704d760e, 0xe95e7cd3, 0xde1bfdb1, 0xce2c5a45) }; -static void test_ecmult_target(const rustsecp256k1_v0_9_2_scalar* target, int mode) { +static void test_ecmult_target(const rustsecp256k1_v0_10_0_scalar* target, int mode) { /* Mode: 0=ecmult_gen, 1=ecmult, 2=ecmult_const */ - rustsecp256k1_v0_9_2_scalar n1, n2; - rustsecp256k1_v0_9_2_ge p; - rustsecp256k1_v0_9_2_gej pj, p1j, p2j, ptj; + rustsecp256k1_v0_10_0_scalar n1, n2; + rustsecp256k1_v0_10_0_ge p; + rustsecp256k1_v0_10_0_gej pj, p1j, p2j, ptj; /* Generate random n1,n2 such that n1+n2 = -target. */ random_scalar_order_test(&n1); - rustsecp256k1_v0_9_2_scalar_add(&n2, &n1, target); - rustsecp256k1_v0_9_2_scalar_negate(&n2, &n2); + rustsecp256k1_v0_10_0_scalar_add(&n2, &n1, target); + rustsecp256k1_v0_10_0_scalar_negate(&n2, &n2); /* Generate a random input point. */ if (mode != 0) { random_group_element_test(&p); - rustsecp256k1_v0_9_2_gej_set_ge(&pj, &p); + rustsecp256k1_v0_10_0_gej_set_ge(&pj, &p); } /* EC multiplications */ if (mode == 0) { - rustsecp256k1_v0_9_2_ecmult_gen(&CTX->ecmult_gen_ctx, &p1j, &n1); - rustsecp256k1_v0_9_2_ecmult_gen(&CTX->ecmult_gen_ctx, &p2j, &n2); - rustsecp256k1_v0_9_2_ecmult_gen(&CTX->ecmult_gen_ctx, &ptj, target); + rustsecp256k1_v0_10_0_ecmult_gen(&CTX->ecmult_gen_ctx, &p1j, &n1); + rustsecp256k1_v0_10_0_ecmult_gen(&CTX->ecmult_gen_ctx, &p2j, &n2); + rustsecp256k1_v0_10_0_ecmult_gen(&CTX->ecmult_gen_ctx, &ptj, target); } else if (mode == 1) { - rustsecp256k1_v0_9_2_ecmult(&p1j, &pj, &n1, &rustsecp256k1_v0_9_2_scalar_zero); - rustsecp256k1_v0_9_2_ecmult(&p2j, &pj, &n2, &rustsecp256k1_v0_9_2_scalar_zero); - rustsecp256k1_v0_9_2_ecmult(&ptj, &pj, target, &rustsecp256k1_v0_9_2_scalar_zero); + rustsecp256k1_v0_10_0_ecmult(&p1j, &pj, &n1, &rustsecp256k1_v0_10_0_scalar_zero); + rustsecp256k1_v0_10_0_ecmult(&p2j, &pj, &n2, &rustsecp256k1_v0_10_0_scalar_zero); + rustsecp256k1_v0_10_0_ecmult(&ptj, &pj, target, &rustsecp256k1_v0_10_0_scalar_zero); } else { - rustsecp256k1_v0_9_2_ecmult_const(&p1j, &p, &n1); - rustsecp256k1_v0_9_2_ecmult_const(&p2j, &p, &n2); - rustsecp256k1_v0_9_2_ecmult_const(&ptj, &p, target); + rustsecp256k1_v0_10_0_ecmult_const(&p1j, &p, &n1); + rustsecp256k1_v0_10_0_ecmult_const(&p2j, &p, &n2); + rustsecp256k1_v0_10_0_ecmult_const(&ptj, &p, target); } /* Add them all up: n1*P + n2*P + target*P = (n1+n2+target)*P = (n1+n1-n1-n2)*P = 0. */ - rustsecp256k1_v0_9_2_gej_add_var(&ptj, &ptj, &p1j, NULL); - rustsecp256k1_v0_9_2_gej_add_var(&ptj, &ptj, &p2j, NULL); - CHECK(rustsecp256k1_v0_9_2_gej_is_infinity(&ptj)); + rustsecp256k1_v0_10_0_gej_add_var(&ptj, &ptj, &p1j, NULL); + rustsecp256k1_v0_10_0_gej_add_var(&ptj, &ptj, &p2j, NULL); + CHECK(rustsecp256k1_v0_10_0_gej_is_infinity(&ptj)); } static void run_ecmult_near_split_bound(void) { @@ -4439,102 +4392,151 @@ static void run_ecmult_near_split_bound(void) { static void run_point_times_order(void) { int i; - rustsecp256k1_v0_9_2_fe x = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 2); - static const rustsecp256k1_v0_9_2_fe xr = SECP256K1_FE_CONST( + rustsecp256k1_v0_10_0_fe x = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 2); + static const rustsecp256k1_v0_10_0_fe xr = SECP256K1_FE_CONST( 0x7603CB59, 0xB0EF6C63, 0xFE608479, 0x2A0C378C, 0xDB3233A8, 0x0F8A9A09, 0xA877DEAD, 0x31B38C45 ); for (i = 0; i < 500; i++) { - rustsecp256k1_v0_9_2_ge p; - if (rustsecp256k1_v0_9_2_ge_set_xo_var(&p, &x, 1)) { - rustsecp256k1_v0_9_2_gej j; - CHECK(rustsecp256k1_v0_9_2_ge_is_valid_var(&p)); - rustsecp256k1_v0_9_2_gej_set_ge(&j, &p); + rustsecp256k1_v0_10_0_ge p; + if (rustsecp256k1_v0_10_0_ge_set_xo_var(&p, &x, 1)) { + rustsecp256k1_v0_10_0_gej j; + CHECK(rustsecp256k1_v0_10_0_ge_is_valid_var(&p)); + rustsecp256k1_v0_10_0_gej_set_ge(&j, &p); test_point_times_order(&j); } - rustsecp256k1_v0_9_2_fe_sqr(&x, &x); + rustsecp256k1_v0_10_0_fe_sqr(&x, &x); } - rustsecp256k1_v0_9_2_fe_normalize_var(&x); - CHECK(rustsecp256k1_v0_9_2_fe_equal(&x, &xr)); + rustsecp256k1_v0_10_0_fe_normalize_var(&x); + CHECK(rustsecp256k1_v0_10_0_fe_equal(&x, &xr)); } static void ecmult_const_random_mult(void) { /* random starting point A (on the curve) */ - rustsecp256k1_v0_9_2_ge a = SECP256K1_GE_CONST( + rustsecp256k1_v0_10_0_ge a = SECP256K1_GE_CONST( 0x6d986544, 0x57ff52b8, 0xcf1b8126, 0x5b802a5b, 0xa97f9263, 0xb1e88044, 0x93351325, 0x91bc450a, 0x535c59f7, 0x325e5d2b, 0xc391fbe8, 0x3c12787c, 0x337e4a98, 0xe82a9011, 0x0123ba37, 0xdd769c7d ); /* random initial factor xn */ - rustsecp256k1_v0_9_2_scalar xn = SECP256K1_SCALAR_CONST( + rustsecp256k1_v0_10_0_scalar xn = SECP256K1_SCALAR_CONST( 0x649d4f77, 0xc4242df7, 0x7f2079c9, 0x14530327, 0xa31b876a, 0xd2d8ce2a, 0x2236d5c6, 0xd7b2029b ); /* expected xn * A (from sage) */ - rustsecp256k1_v0_9_2_ge expected_b = SECP256K1_GE_CONST( + rustsecp256k1_v0_10_0_ge expected_b = SECP256K1_GE_CONST( 0x23773684, 0x4d209dc7, 0x098a786f, 0x20d06fcd, 0x070a38bf, 0xc11ac651, 0x03004319, 0x1e2a8786, 0xed8c3b8e, 0xc06dd57b, 0xd06ea66e, 0x45492b0f, 0xb84e4e1b, 0xfb77e21f, 0x96baae2a, 0x63dec956 ); - rustsecp256k1_v0_9_2_gej b; - rustsecp256k1_v0_9_2_ecmult_const(&b, &a, &xn); + rustsecp256k1_v0_10_0_gej b; + rustsecp256k1_v0_10_0_ecmult_const(&b, &a, &xn); - CHECK(rustsecp256k1_v0_9_2_ge_is_valid_var(&a)); - ge_equals_gej(&expected_b, &b); + CHECK(rustsecp256k1_v0_10_0_ge_is_valid_var(&a)); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&b, &expected_b)); } static void ecmult_const_commutativity(void) { - rustsecp256k1_v0_9_2_scalar a; - rustsecp256k1_v0_9_2_scalar b; - rustsecp256k1_v0_9_2_gej res1; - rustsecp256k1_v0_9_2_gej res2; - rustsecp256k1_v0_9_2_ge mid1; - rustsecp256k1_v0_9_2_ge mid2; + rustsecp256k1_v0_10_0_scalar a; + rustsecp256k1_v0_10_0_scalar b; + rustsecp256k1_v0_10_0_gej res1; + rustsecp256k1_v0_10_0_gej res2; + rustsecp256k1_v0_10_0_ge mid1; + rustsecp256k1_v0_10_0_ge mid2; random_scalar_order_test(&a); random_scalar_order_test(&b); - rustsecp256k1_v0_9_2_ecmult_const(&res1, &rustsecp256k1_v0_9_2_ge_const_g, &a); - rustsecp256k1_v0_9_2_ecmult_const(&res2, &rustsecp256k1_v0_9_2_ge_const_g, &b); - rustsecp256k1_v0_9_2_ge_set_gej(&mid1, &res1); - rustsecp256k1_v0_9_2_ge_set_gej(&mid2, &res2); - rustsecp256k1_v0_9_2_ecmult_const(&res1, &mid1, &b); - rustsecp256k1_v0_9_2_ecmult_const(&res2, &mid2, &a); - rustsecp256k1_v0_9_2_ge_set_gej(&mid1, &res1); - rustsecp256k1_v0_9_2_ge_set_gej(&mid2, &res2); - ge_equals_ge(&mid1, &mid2); + rustsecp256k1_v0_10_0_ecmult_const(&res1, &rustsecp256k1_v0_10_0_ge_const_g, &a); + rustsecp256k1_v0_10_0_ecmult_const(&res2, &rustsecp256k1_v0_10_0_ge_const_g, &b); + rustsecp256k1_v0_10_0_ge_set_gej(&mid1, &res1); + rustsecp256k1_v0_10_0_ge_set_gej(&mid2, &res2); + rustsecp256k1_v0_10_0_ecmult_const(&res1, &mid1, &b); + rustsecp256k1_v0_10_0_ecmult_const(&res2, &mid2, &a); + rustsecp256k1_v0_10_0_ge_set_gej(&mid1, &res1); + rustsecp256k1_v0_10_0_ge_set_gej(&mid2, &res2); + CHECK(rustsecp256k1_v0_10_0_ge_eq_var(&mid1, &mid2)); } static void ecmult_const_mult_zero_one(void) { - rustsecp256k1_v0_9_2_scalar negone; - rustsecp256k1_v0_9_2_gej res1; - rustsecp256k1_v0_9_2_ge res2; - rustsecp256k1_v0_9_2_ge point; - rustsecp256k1_v0_9_2_scalar_negate(&negone, &rustsecp256k1_v0_9_2_scalar_one); + rustsecp256k1_v0_10_0_scalar s; + rustsecp256k1_v0_10_0_scalar negone; + rustsecp256k1_v0_10_0_gej res1; + rustsecp256k1_v0_10_0_ge res2; + rustsecp256k1_v0_10_0_ge point; + rustsecp256k1_v0_10_0_ge inf; + random_scalar_order_test(&s); + rustsecp256k1_v0_10_0_scalar_negate(&negone, &rustsecp256k1_v0_10_0_scalar_one); random_group_element_test(&point); - rustsecp256k1_v0_9_2_ecmult_const(&res1, &point, &rustsecp256k1_v0_9_2_scalar_zero); - rustsecp256k1_v0_9_2_ge_set_gej(&res2, &res1); - CHECK(rustsecp256k1_v0_9_2_ge_is_infinity(&res2)); - rustsecp256k1_v0_9_2_ecmult_const(&res1, &point, &rustsecp256k1_v0_9_2_scalar_one); - rustsecp256k1_v0_9_2_ge_set_gej(&res2, &res1); - ge_equals_ge(&res2, &point); - rustsecp256k1_v0_9_2_ecmult_const(&res1, &point, &negone); - rustsecp256k1_v0_9_2_gej_neg(&res1, &res1); - rustsecp256k1_v0_9_2_ge_set_gej(&res2, &res1); - ge_equals_ge(&res2, &point); + rustsecp256k1_v0_10_0_ge_set_infinity(&inf); + + /* 0*point */ + rustsecp256k1_v0_10_0_ecmult_const(&res1, &point, &rustsecp256k1_v0_10_0_scalar_zero); + CHECK(rustsecp256k1_v0_10_0_gej_is_infinity(&res1)); + + /* s*inf */ + rustsecp256k1_v0_10_0_ecmult_const(&res1, &inf, &s); + CHECK(rustsecp256k1_v0_10_0_gej_is_infinity(&res1)); + + /* 1*point */ + rustsecp256k1_v0_10_0_ecmult_const(&res1, &point, &rustsecp256k1_v0_10_0_scalar_one); + rustsecp256k1_v0_10_0_ge_set_gej(&res2, &res1); + CHECK(rustsecp256k1_v0_10_0_ge_eq_var(&res2, &point)); + + /* -1*point */ + rustsecp256k1_v0_10_0_ecmult_const(&res1, &point, &negone); + rustsecp256k1_v0_10_0_gej_neg(&res1, &res1); + rustsecp256k1_v0_10_0_ge_set_gej(&res2, &res1); + CHECK(rustsecp256k1_v0_10_0_ge_eq_var(&res2, &point)); +} + +static void ecmult_const_check_result(const rustsecp256k1_v0_10_0_ge *A, const rustsecp256k1_v0_10_0_scalar* q, const rustsecp256k1_v0_10_0_gej *res) { + rustsecp256k1_v0_10_0_gej pointj, res2j; + rustsecp256k1_v0_10_0_ge res2; + rustsecp256k1_v0_10_0_gej_set_ge(&pointj, A); + rustsecp256k1_v0_10_0_ecmult(&res2j, &pointj, q, &rustsecp256k1_v0_10_0_scalar_zero); + rustsecp256k1_v0_10_0_ge_set_gej(&res2, &res2j); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(res, &res2)); +} + +static void ecmult_const_edges(void) { + rustsecp256k1_v0_10_0_scalar q; + rustsecp256k1_v0_10_0_ge point; + rustsecp256k1_v0_10_0_gej res; + size_t i; + size_t cases = 1 + sizeof(scalars_near_split_bounds) / sizeof(scalars_near_split_bounds[0]); + + /* We are trying to reach the following edge cases (variables are defined as + * in ecmult_const_impl.h): + * 1. i = 0: s = 0 <=> q = -K + * 2. i > 0: v1, v2 large values + * <=> s1, s2 large values + * <=> s = scalars_near_split_bounds[i] + * <=> q = 2*scalars_near_split_bounds[i] - K + */ + for (i = 0; i < cases; ++i) { + rustsecp256k1_v0_10_0_scalar_negate(&q, &rustsecp256k1_v0_10_0_ecmult_const_K); + if (i > 0) { + rustsecp256k1_v0_10_0_scalar_add(&q, &q, &scalars_near_split_bounds[i - 1]); + rustsecp256k1_v0_10_0_scalar_add(&q, &q, &scalars_near_split_bounds[i - 1]); + } + random_group_element_test(&point); + rustsecp256k1_v0_10_0_ecmult_const(&res, &point, &q); + ecmult_const_check_result(&point, &q, &res); + } } static void ecmult_const_mult_xonly(void) { int i; - /* Test correspondence between rustsecp256k1_v0_9_2_ecmult_const and rustsecp256k1_v0_9_2_ecmult_const_xonly. */ + /* Test correspondence between rustsecp256k1_v0_10_0_ecmult_const and rustsecp256k1_v0_10_0_ecmult_const_xonly. */ for (i = 0; i < 2*COUNT; ++i) { - rustsecp256k1_v0_9_2_ge base; - rustsecp256k1_v0_9_2_gej basej, resj; - rustsecp256k1_v0_9_2_fe n, d, resx, v; - rustsecp256k1_v0_9_2_scalar q; + rustsecp256k1_v0_10_0_ge base; + rustsecp256k1_v0_10_0_gej basej, resj; + rustsecp256k1_v0_10_0_fe n, d, resx, v; + rustsecp256k1_v0_10_0_scalar q; int res; /* Random base point. */ random_group_element_test(&base); @@ -4543,72 +4545,73 @@ static void ecmult_const_mult_xonly(void) { /* If i is odd, n=d*base.x for random non-zero d */ if (i & 1) { random_fe_non_zero_test(&d); - rustsecp256k1_v0_9_2_fe_mul(&n, &base.x, &d); + rustsecp256k1_v0_10_0_fe_mul(&n, &base.x, &d); } else { n = base.x; } /* Perform x-only multiplication. */ - res = rustsecp256k1_v0_9_2_ecmult_const_xonly(&resx, &n, (i & 1) ? &d : NULL, &q, i & 2); + res = rustsecp256k1_v0_10_0_ecmult_const_xonly(&resx, &n, (i & 1) ? &d : NULL, &q, i & 2); CHECK(res); /* Perform normal multiplication. */ - rustsecp256k1_v0_9_2_gej_set_ge(&basej, &base); - rustsecp256k1_v0_9_2_ecmult(&resj, &basej, &q, NULL); + rustsecp256k1_v0_10_0_gej_set_ge(&basej, &base); + rustsecp256k1_v0_10_0_ecmult(&resj, &basej, &q, NULL); /* Check that resj's X coordinate corresponds with resx. */ - rustsecp256k1_v0_9_2_fe_sqr(&v, &resj.z); - rustsecp256k1_v0_9_2_fe_mul(&v, &v, &resx); + rustsecp256k1_v0_10_0_fe_sqr(&v, &resj.z); + rustsecp256k1_v0_10_0_fe_mul(&v, &v, &resx); CHECK(check_fe_equal(&v, &resj.x)); } - /* Test that rustsecp256k1_v0_9_2_ecmult_const_xonly correctly rejects X coordinates not on curve. */ + /* Test that rustsecp256k1_v0_10_0_ecmult_const_xonly correctly rejects X coordinates not on curve. */ for (i = 0; i < 2*COUNT; ++i) { - rustsecp256k1_v0_9_2_fe x, n, d, r; + rustsecp256k1_v0_10_0_fe x, n, d, r; int res; - rustsecp256k1_v0_9_2_scalar q; + rustsecp256k1_v0_10_0_scalar q; random_scalar_order_test(&q); /* Generate random X coordinate not on the curve. */ do { random_fe_test(&x); - } while (rustsecp256k1_v0_9_2_ge_x_on_curve_var(&x)); + } while (rustsecp256k1_v0_10_0_ge_x_on_curve_var(&x)); /* If i is odd, n=d*x for random non-zero d. */ if (i & 1) { random_fe_non_zero_test(&d); - rustsecp256k1_v0_9_2_fe_mul(&n, &x, &d); + rustsecp256k1_v0_10_0_fe_mul(&n, &x, &d); } else { n = x; } - res = rustsecp256k1_v0_9_2_ecmult_const_xonly(&r, &n, (i & 1) ? &d : NULL, &q, 0); + res = rustsecp256k1_v0_10_0_ecmult_const_xonly(&r, &n, (i & 1) ? &d : NULL, &q, 0); CHECK(res == 0); } } static void ecmult_const_chain_multiply(void) { /* Check known result (randomly generated test problem from sage) */ - const rustsecp256k1_v0_9_2_scalar scalar = SECP256K1_SCALAR_CONST( + const rustsecp256k1_v0_10_0_scalar scalar = SECP256K1_SCALAR_CONST( 0x4968d524, 0x2abf9b7a, 0x466abbcf, 0x34b11b6d, 0xcd83d307, 0x827bed62, 0x05fad0ce, 0x18fae63b ); - const rustsecp256k1_v0_9_2_gej expected_point = SECP256K1_GEJ_CONST( + const rustsecp256k1_v0_10_0_gej expected_point = SECP256K1_GEJ_CONST( 0x5494c15d, 0x32099706, 0xc2395f94, 0x348745fd, 0x757ce30e, 0x4e8c90fb, 0xa2bad184, 0xf883c69f, 0x5d195d20, 0xe191bf7f, 0x1be3e55f, 0x56a80196, 0x6071ad01, 0xf1462f66, 0xc997fa94, 0xdb858435 ); - rustsecp256k1_v0_9_2_gej point; - rustsecp256k1_v0_9_2_ge res; + rustsecp256k1_v0_10_0_gej point; + rustsecp256k1_v0_10_0_ge res; int i; - rustsecp256k1_v0_9_2_gej_set_ge(&point, &rustsecp256k1_v0_9_2_ge_const_g); + rustsecp256k1_v0_10_0_gej_set_ge(&point, &rustsecp256k1_v0_10_0_ge_const_g); for (i = 0; i < 100; ++i) { - rustsecp256k1_v0_9_2_ge tmp; - rustsecp256k1_v0_9_2_ge_set_gej(&tmp, &point); - rustsecp256k1_v0_9_2_ecmult_const(&point, &tmp, &scalar); + rustsecp256k1_v0_10_0_ge tmp; + rustsecp256k1_v0_10_0_ge_set_gej(&tmp, &point); + rustsecp256k1_v0_10_0_ecmult_const(&point, &tmp, &scalar); } - rustsecp256k1_v0_9_2_ge_set_gej(&res, &point); - ge_equals_gej(&res, &expected_point); + rustsecp256k1_v0_10_0_ge_set_gej(&res, &point); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&expected_point, &res)); } static void run_ecmult_const_tests(void) { ecmult_const_mult_zero_one(); + ecmult_const_edges(); ecmult_const_random_mult(); ecmult_const_commutativity(); ecmult_const_chain_multiply(); @@ -4616,18 +4619,18 @@ static void run_ecmult_const_tests(void) { } typedef struct { - rustsecp256k1_v0_9_2_scalar *sc; - rustsecp256k1_v0_9_2_ge *pt; + rustsecp256k1_v0_10_0_scalar *sc; + rustsecp256k1_v0_10_0_ge *pt; } ecmult_multi_data; -static int ecmult_multi_callback(rustsecp256k1_v0_9_2_scalar *sc, rustsecp256k1_v0_9_2_ge *pt, size_t idx, void *cbdata) { +static int ecmult_multi_callback(rustsecp256k1_v0_10_0_scalar *sc, rustsecp256k1_v0_10_0_ge *pt, size_t idx, void *cbdata) { ecmult_multi_data *data = (ecmult_multi_data*) cbdata; *sc = data->sc[idx]; *pt = data->pt[idx]; return 1; } -static int ecmult_multi_false_callback(rustsecp256k1_v0_9_2_scalar *sc, rustsecp256k1_v0_9_2_ge *pt, size_t idx, void *cbdata) { +static int ecmult_multi_false_callback(rustsecp256k1_v0_10_0_scalar *sc, rustsecp256k1_v0_10_0_ge *pt, size_t idx, void *cbdata) { (void)sc; (void)pt; (void)idx; @@ -4635,12 +4638,12 @@ static int ecmult_multi_false_callback(rustsecp256k1_v0_9_2_scalar *sc, rustsecp return 0; } -static void test_ecmult_multi(rustsecp256k1_v0_9_2_scratch *scratch, rustsecp256k1_v0_9_2_ecmult_multi_func ecmult_multi) { +static void test_ecmult_multi(rustsecp256k1_v0_10_0_scratch *scratch, rustsecp256k1_v0_10_0_ecmult_multi_func ecmult_multi) { int ncount; - rustsecp256k1_v0_9_2_scalar sc[32]; - rustsecp256k1_v0_9_2_ge pt[32]; - rustsecp256k1_v0_9_2_gej r; - rustsecp256k1_v0_9_2_gej r2; + rustsecp256k1_v0_10_0_scalar sc[32]; + rustsecp256k1_v0_10_0_ge pt[32]; + rustsecp256k1_v0_10_0_gej r; + rustsecp256k1_v0_10_0_gej r2; ecmult_multi_data data; data.sc = sc; @@ -4651,76 +4654,76 @@ static void test_ecmult_multi(rustsecp256k1_v0_9_2_scratch *scratch, rustsecp256 /* Check 1- and 2-point multiplies against ecmult */ for (ncount = 0; ncount < COUNT; ncount++) { - rustsecp256k1_v0_9_2_ge ptg; - rustsecp256k1_v0_9_2_gej ptgj; + rustsecp256k1_v0_10_0_ge ptg; + rustsecp256k1_v0_10_0_gej ptgj; random_scalar_order(&sc[0]); random_scalar_order(&sc[1]); random_group_element_test(&ptg); - rustsecp256k1_v0_9_2_gej_set_ge(&ptgj, &ptg); + rustsecp256k1_v0_10_0_gej_set_ge(&ptgj, &ptg); pt[0] = ptg; - pt[1] = rustsecp256k1_v0_9_2_ge_const_g; + pt[1] = rustsecp256k1_v0_10_0_ge_const_g; /* only G scalar */ - rustsecp256k1_v0_9_2_ecmult(&r2, &ptgj, &rustsecp256k1_v0_9_2_scalar_zero, &sc[0]); + rustsecp256k1_v0_10_0_ecmult(&r2, &ptgj, &rustsecp256k1_v0_10_0_scalar_zero, &sc[0]); CHECK(ecmult_multi(&CTX->error_callback, scratch, &r, &sc[0], ecmult_multi_callback, &data, 0)); - CHECK(rustsecp256k1_v0_9_2_gej_eq_var(&r, &r2)); + CHECK(rustsecp256k1_v0_10_0_gej_eq_var(&r, &r2)); /* 1-point */ - rustsecp256k1_v0_9_2_ecmult(&r2, &ptgj, &sc[0], &rustsecp256k1_v0_9_2_scalar_zero); - CHECK(ecmult_multi(&CTX->error_callback, scratch, &r, &rustsecp256k1_v0_9_2_scalar_zero, ecmult_multi_callback, &data, 1)); - CHECK(rustsecp256k1_v0_9_2_gej_eq_var(&r, &r2)); + rustsecp256k1_v0_10_0_ecmult(&r2, &ptgj, &sc[0], &rustsecp256k1_v0_10_0_scalar_zero); + CHECK(ecmult_multi(&CTX->error_callback, scratch, &r, &rustsecp256k1_v0_10_0_scalar_zero, ecmult_multi_callback, &data, 1)); + CHECK(rustsecp256k1_v0_10_0_gej_eq_var(&r, &r2)); /* Try to multiply 1 point, but callback returns false */ - CHECK(!ecmult_multi(&CTX->error_callback, scratch, &r, &rustsecp256k1_v0_9_2_scalar_zero, ecmult_multi_false_callback, &data, 1)); + CHECK(!ecmult_multi(&CTX->error_callback, scratch, &r, &rustsecp256k1_v0_10_0_scalar_zero, ecmult_multi_false_callback, &data, 1)); /* 2-point */ - rustsecp256k1_v0_9_2_ecmult(&r2, &ptgj, &sc[0], &sc[1]); - CHECK(ecmult_multi(&CTX->error_callback, scratch, &r, &rustsecp256k1_v0_9_2_scalar_zero, ecmult_multi_callback, &data, 2)); - CHECK(rustsecp256k1_v0_9_2_gej_eq_var(&r, &r2)); + rustsecp256k1_v0_10_0_ecmult(&r2, &ptgj, &sc[0], &sc[1]); + CHECK(ecmult_multi(&CTX->error_callback, scratch, &r, &rustsecp256k1_v0_10_0_scalar_zero, ecmult_multi_callback, &data, 2)); + CHECK(rustsecp256k1_v0_10_0_gej_eq_var(&r, &r2)); /* 2-point with G scalar */ - rustsecp256k1_v0_9_2_ecmult(&r2, &ptgj, &sc[0], &sc[1]); + rustsecp256k1_v0_10_0_ecmult(&r2, &ptgj, &sc[0], &sc[1]); CHECK(ecmult_multi(&CTX->error_callback, scratch, &r, &sc[1], ecmult_multi_callback, &data, 1)); - CHECK(rustsecp256k1_v0_9_2_gej_eq_var(&r, &r2)); + CHECK(rustsecp256k1_v0_10_0_gej_eq_var(&r, &r2)); } /* Check infinite outputs of various forms */ for (ncount = 0; ncount < COUNT; ncount++) { - rustsecp256k1_v0_9_2_ge ptg; + rustsecp256k1_v0_10_0_ge ptg; size_t i, j; size_t sizes[] = { 2, 10, 32 }; for (j = 0; j < 3; j++) { for (i = 0; i < 32; i++) { random_scalar_order(&sc[i]); - rustsecp256k1_v0_9_2_ge_set_infinity(&pt[i]); + rustsecp256k1_v0_10_0_ge_set_infinity(&pt[i]); } - CHECK(ecmult_multi(&CTX->error_callback, scratch, &r, &rustsecp256k1_v0_9_2_scalar_zero, ecmult_multi_callback, &data, sizes[j])); - CHECK(rustsecp256k1_v0_9_2_gej_is_infinity(&r)); + CHECK(ecmult_multi(&CTX->error_callback, scratch, &r, &rustsecp256k1_v0_10_0_scalar_zero, ecmult_multi_callback, &data, sizes[j])); + CHECK(rustsecp256k1_v0_10_0_gej_is_infinity(&r)); } for (j = 0; j < 3; j++) { for (i = 0; i < 32; i++) { random_group_element_test(&ptg); pt[i] = ptg; - rustsecp256k1_v0_9_2_scalar_set_int(&sc[i], 0); + rustsecp256k1_v0_10_0_scalar_set_int(&sc[i], 0); } - CHECK(ecmult_multi(&CTX->error_callback, scratch, &r, &rustsecp256k1_v0_9_2_scalar_zero, ecmult_multi_callback, &data, sizes[j])); - CHECK(rustsecp256k1_v0_9_2_gej_is_infinity(&r)); + CHECK(ecmult_multi(&CTX->error_callback, scratch, &r, &rustsecp256k1_v0_10_0_scalar_zero, ecmult_multi_callback, &data, sizes[j])); + CHECK(rustsecp256k1_v0_10_0_gej_is_infinity(&r)); } for (j = 0; j < 3; j++) { random_group_element_test(&ptg); for (i = 0; i < 16; i++) { random_scalar_order(&sc[2*i]); - rustsecp256k1_v0_9_2_scalar_negate(&sc[2*i + 1], &sc[2*i]); + rustsecp256k1_v0_10_0_scalar_negate(&sc[2*i + 1], &sc[2*i]); pt[2 * i] = ptg; pt[2 * i + 1] = ptg; } - CHECK(ecmult_multi(&CTX->error_callback, scratch, &r, &rustsecp256k1_v0_9_2_scalar_zero, ecmult_multi_callback, &data, sizes[j])); - CHECK(rustsecp256k1_v0_9_2_gej_is_infinity(&r)); + CHECK(ecmult_multi(&CTX->error_callback, scratch, &r, &rustsecp256k1_v0_10_0_scalar_zero, ecmult_multi_callback, &data, sizes[j])); + CHECK(rustsecp256k1_v0_10_0_gej_is_infinity(&r)); random_scalar_order(&sc[0]); for (i = 0; i < 16; i++) { @@ -4729,66 +4732,66 @@ static void test_ecmult_multi(rustsecp256k1_v0_9_2_scratch *scratch, rustsecp256 sc[2*i] = sc[0]; sc[2*i+1] = sc[0]; pt[2 * i] = ptg; - rustsecp256k1_v0_9_2_ge_neg(&pt[2*i+1], &pt[2*i]); + rustsecp256k1_v0_10_0_ge_neg(&pt[2*i+1], &pt[2*i]); } - CHECK(ecmult_multi(&CTX->error_callback, scratch, &r, &rustsecp256k1_v0_9_2_scalar_zero, ecmult_multi_callback, &data, sizes[j])); - CHECK(rustsecp256k1_v0_9_2_gej_is_infinity(&r)); + CHECK(ecmult_multi(&CTX->error_callback, scratch, &r, &rustsecp256k1_v0_10_0_scalar_zero, ecmult_multi_callback, &data, sizes[j])); + CHECK(rustsecp256k1_v0_10_0_gej_is_infinity(&r)); } random_group_element_test(&ptg); - rustsecp256k1_v0_9_2_scalar_set_int(&sc[0], 0); + rustsecp256k1_v0_10_0_scalar_set_int(&sc[0], 0); pt[0] = ptg; for (i = 1; i < 32; i++) { pt[i] = ptg; random_scalar_order(&sc[i]); - rustsecp256k1_v0_9_2_scalar_add(&sc[0], &sc[0], &sc[i]); - rustsecp256k1_v0_9_2_scalar_negate(&sc[i], &sc[i]); + rustsecp256k1_v0_10_0_scalar_add(&sc[0], &sc[0], &sc[i]); + rustsecp256k1_v0_10_0_scalar_negate(&sc[i], &sc[i]); } - CHECK(ecmult_multi(&CTX->error_callback, scratch, &r, &rustsecp256k1_v0_9_2_scalar_zero, ecmult_multi_callback, &data, 32)); - CHECK(rustsecp256k1_v0_9_2_gej_is_infinity(&r)); + CHECK(ecmult_multi(&CTX->error_callback, scratch, &r, &rustsecp256k1_v0_10_0_scalar_zero, ecmult_multi_callback, &data, 32)); + CHECK(rustsecp256k1_v0_10_0_gej_is_infinity(&r)); } /* Check random points, constant scalar */ for (ncount = 0; ncount < COUNT; ncount++) { size_t i; - rustsecp256k1_v0_9_2_gej_set_infinity(&r); + rustsecp256k1_v0_10_0_gej_set_infinity(&r); random_scalar_order(&sc[0]); for (i = 0; i < 20; i++) { - rustsecp256k1_v0_9_2_ge ptg; + rustsecp256k1_v0_10_0_ge ptg; sc[i] = sc[0]; random_group_element_test(&ptg); pt[i] = ptg; - rustsecp256k1_v0_9_2_gej_add_ge_var(&r, &r, &pt[i], NULL); + rustsecp256k1_v0_10_0_gej_add_ge_var(&r, &r, &pt[i], NULL); } - rustsecp256k1_v0_9_2_ecmult(&r2, &r, &sc[0], &rustsecp256k1_v0_9_2_scalar_zero); - CHECK(ecmult_multi(&CTX->error_callback, scratch, &r, &rustsecp256k1_v0_9_2_scalar_zero, ecmult_multi_callback, &data, 20)); - CHECK(rustsecp256k1_v0_9_2_gej_eq_var(&r, &r2)); + rustsecp256k1_v0_10_0_ecmult(&r2, &r, &sc[0], &rustsecp256k1_v0_10_0_scalar_zero); + CHECK(ecmult_multi(&CTX->error_callback, scratch, &r, &rustsecp256k1_v0_10_0_scalar_zero, ecmult_multi_callback, &data, 20)); + CHECK(rustsecp256k1_v0_10_0_gej_eq_var(&r, &r2)); } /* Check random scalars, constant point */ for (ncount = 0; ncount < COUNT; ncount++) { size_t i; - rustsecp256k1_v0_9_2_ge ptg; - rustsecp256k1_v0_9_2_gej p0j; - rustsecp256k1_v0_9_2_scalar rs; - rustsecp256k1_v0_9_2_scalar_set_int(&rs, 0); + rustsecp256k1_v0_10_0_ge ptg; + rustsecp256k1_v0_10_0_gej p0j; + rustsecp256k1_v0_10_0_scalar rs; + rustsecp256k1_v0_10_0_scalar_set_int(&rs, 0); random_group_element_test(&ptg); for (i = 0; i < 20; i++) { random_scalar_order(&sc[i]); pt[i] = ptg; - rustsecp256k1_v0_9_2_scalar_add(&rs, &rs, &sc[i]); + rustsecp256k1_v0_10_0_scalar_add(&rs, &rs, &sc[i]); } - rustsecp256k1_v0_9_2_gej_set_ge(&p0j, &pt[0]); - rustsecp256k1_v0_9_2_ecmult(&r2, &p0j, &rs, &rustsecp256k1_v0_9_2_scalar_zero); - CHECK(ecmult_multi(&CTX->error_callback, scratch, &r, &rustsecp256k1_v0_9_2_scalar_zero, ecmult_multi_callback, &data, 20)); - CHECK(rustsecp256k1_v0_9_2_gej_eq_var(&r, &r2)); + rustsecp256k1_v0_10_0_gej_set_ge(&p0j, &pt[0]); + rustsecp256k1_v0_10_0_ecmult(&r2, &p0j, &rs, &rustsecp256k1_v0_10_0_scalar_zero); + CHECK(ecmult_multi(&CTX->error_callback, scratch, &r, &rustsecp256k1_v0_10_0_scalar_zero, ecmult_multi_callback, &data, 20)); + CHECK(rustsecp256k1_v0_10_0_gej_eq_var(&r, &r2)); } /* Sanity check that zero scalars don't cause problems */ @@ -4797,60 +4800,60 @@ static void test_ecmult_multi(rustsecp256k1_v0_9_2_scratch *scratch, rustsecp256 random_group_element_test(&pt[ncount]); } - rustsecp256k1_v0_9_2_scalar_clear(&sc[0]); - CHECK(ecmult_multi(&CTX->error_callback, scratch, &r, &rustsecp256k1_v0_9_2_scalar_zero, ecmult_multi_callback, &data, 20)); - rustsecp256k1_v0_9_2_scalar_clear(&sc[1]); - rustsecp256k1_v0_9_2_scalar_clear(&sc[2]); - rustsecp256k1_v0_9_2_scalar_clear(&sc[3]); - rustsecp256k1_v0_9_2_scalar_clear(&sc[4]); - CHECK(ecmult_multi(&CTX->error_callback, scratch, &r, &rustsecp256k1_v0_9_2_scalar_zero, ecmult_multi_callback, &data, 6)); - CHECK(ecmult_multi(&CTX->error_callback, scratch, &r, &rustsecp256k1_v0_9_2_scalar_zero, ecmult_multi_callback, &data, 5)); - CHECK(rustsecp256k1_v0_9_2_gej_is_infinity(&r)); + rustsecp256k1_v0_10_0_scalar_clear(&sc[0]); + CHECK(ecmult_multi(&CTX->error_callback, scratch, &r, &rustsecp256k1_v0_10_0_scalar_zero, ecmult_multi_callback, &data, 20)); + rustsecp256k1_v0_10_0_scalar_clear(&sc[1]); + rustsecp256k1_v0_10_0_scalar_clear(&sc[2]); + rustsecp256k1_v0_10_0_scalar_clear(&sc[3]); + rustsecp256k1_v0_10_0_scalar_clear(&sc[4]); + CHECK(ecmult_multi(&CTX->error_callback, scratch, &r, &rustsecp256k1_v0_10_0_scalar_zero, ecmult_multi_callback, &data, 6)); + CHECK(ecmult_multi(&CTX->error_callback, scratch, &r, &rustsecp256k1_v0_10_0_scalar_zero, ecmult_multi_callback, &data, 5)); + CHECK(rustsecp256k1_v0_10_0_gej_is_infinity(&r)); /* Run through s0*(t0*P) + s1*(t1*P) exhaustively for many small values of s0, s1, t0, t1 */ { const size_t TOP = 8; size_t s0i, s1i; size_t t0i, t1i; - rustsecp256k1_v0_9_2_ge ptg; - rustsecp256k1_v0_9_2_gej ptgj; + rustsecp256k1_v0_10_0_ge ptg; + rustsecp256k1_v0_10_0_gej ptgj; random_group_element_test(&ptg); - rustsecp256k1_v0_9_2_gej_set_ge(&ptgj, &ptg); + rustsecp256k1_v0_10_0_gej_set_ge(&ptgj, &ptg); for(t0i = 0; t0i < TOP; t0i++) { for(t1i = 0; t1i < TOP; t1i++) { - rustsecp256k1_v0_9_2_gej t0p, t1p; - rustsecp256k1_v0_9_2_scalar t0, t1; + rustsecp256k1_v0_10_0_gej t0p, t1p; + rustsecp256k1_v0_10_0_scalar t0, t1; - rustsecp256k1_v0_9_2_scalar_set_int(&t0, (t0i + 1) / 2); - rustsecp256k1_v0_9_2_scalar_cond_negate(&t0, t0i & 1); - rustsecp256k1_v0_9_2_scalar_set_int(&t1, (t1i + 1) / 2); - rustsecp256k1_v0_9_2_scalar_cond_negate(&t1, t1i & 1); + rustsecp256k1_v0_10_0_scalar_set_int(&t0, (t0i + 1) / 2); + rustsecp256k1_v0_10_0_scalar_cond_negate(&t0, t0i & 1); + rustsecp256k1_v0_10_0_scalar_set_int(&t1, (t1i + 1) / 2); + rustsecp256k1_v0_10_0_scalar_cond_negate(&t1, t1i & 1); - rustsecp256k1_v0_9_2_ecmult(&t0p, &ptgj, &t0, &rustsecp256k1_v0_9_2_scalar_zero); - rustsecp256k1_v0_9_2_ecmult(&t1p, &ptgj, &t1, &rustsecp256k1_v0_9_2_scalar_zero); + rustsecp256k1_v0_10_0_ecmult(&t0p, &ptgj, &t0, &rustsecp256k1_v0_10_0_scalar_zero); + rustsecp256k1_v0_10_0_ecmult(&t1p, &ptgj, &t1, &rustsecp256k1_v0_10_0_scalar_zero); for(s0i = 0; s0i < TOP; s0i++) { for(s1i = 0; s1i < TOP; s1i++) { - rustsecp256k1_v0_9_2_scalar tmp1, tmp2; - rustsecp256k1_v0_9_2_gej expected, actual; + rustsecp256k1_v0_10_0_scalar tmp1, tmp2; + rustsecp256k1_v0_10_0_gej expected, actual; - rustsecp256k1_v0_9_2_ge_set_gej(&pt[0], &t0p); - rustsecp256k1_v0_9_2_ge_set_gej(&pt[1], &t1p); + rustsecp256k1_v0_10_0_ge_set_gej(&pt[0], &t0p); + rustsecp256k1_v0_10_0_ge_set_gej(&pt[1], &t1p); - rustsecp256k1_v0_9_2_scalar_set_int(&sc[0], (s0i + 1) / 2); - rustsecp256k1_v0_9_2_scalar_cond_negate(&sc[0], s0i & 1); - rustsecp256k1_v0_9_2_scalar_set_int(&sc[1], (s1i + 1) / 2); - rustsecp256k1_v0_9_2_scalar_cond_negate(&sc[1], s1i & 1); + rustsecp256k1_v0_10_0_scalar_set_int(&sc[0], (s0i + 1) / 2); + rustsecp256k1_v0_10_0_scalar_cond_negate(&sc[0], s0i & 1); + rustsecp256k1_v0_10_0_scalar_set_int(&sc[1], (s1i + 1) / 2); + rustsecp256k1_v0_10_0_scalar_cond_negate(&sc[1], s1i & 1); - rustsecp256k1_v0_9_2_scalar_mul(&tmp1, &t0, &sc[0]); - rustsecp256k1_v0_9_2_scalar_mul(&tmp2, &t1, &sc[1]); - rustsecp256k1_v0_9_2_scalar_add(&tmp1, &tmp1, &tmp2); + rustsecp256k1_v0_10_0_scalar_mul(&tmp1, &t0, &sc[0]); + rustsecp256k1_v0_10_0_scalar_mul(&tmp2, &t1, &sc[1]); + rustsecp256k1_v0_10_0_scalar_add(&tmp1, &tmp1, &tmp2); - rustsecp256k1_v0_9_2_ecmult(&expected, &ptgj, &tmp1, &rustsecp256k1_v0_9_2_scalar_zero); - CHECK(ecmult_multi(&CTX->error_callback, scratch, &actual, &rustsecp256k1_v0_9_2_scalar_zero, ecmult_multi_callback, &data, 2)); - CHECK(rustsecp256k1_v0_9_2_gej_eq_var(&actual, &expected)); + rustsecp256k1_v0_10_0_ecmult(&expected, &ptgj, &tmp1, &rustsecp256k1_v0_10_0_scalar_zero); + CHECK(ecmult_multi(&CTX->error_callback, scratch, &actual, &rustsecp256k1_v0_10_0_scalar_zero, ecmult_multi_callback, &data, 2)); + CHECK(rustsecp256k1_v0_10_0_gej_eq_var(&actual, &expected)); } } } @@ -4858,7 +4861,7 @@ static void test_ecmult_multi(rustsecp256k1_v0_9_2_scratch *scratch, rustsecp256 } } -static int test_ecmult_multi_random(rustsecp256k1_v0_9_2_scratch *scratch) { +static int test_ecmult_multi_random(rustsecp256k1_v0_10_0_scratch *scratch) { /* Large random test for ecmult_multi_* functions which exercises: * - Few or many inputs (0 up to 128, roughly exponentially distributed). * - Few or many 0*P or a*INF inputs (roughly uniformly distributed). @@ -4872,48 +4875,48 @@ static int test_ecmult_multi_random(rustsecp256k1_v0_9_2_scratch *scratch) { * scalars[0..filled-1] and gejs[0..filled-1] are the scalars and points * which form its normal inputs. */ int filled = 0; - rustsecp256k1_v0_9_2_scalar g_scalar = rustsecp256k1_v0_9_2_scalar_zero; - rustsecp256k1_v0_9_2_scalar scalars[128]; - rustsecp256k1_v0_9_2_gej gejs[128]; + rustsecp256k1_v0_10_0_scalar g_scalar = rustsecp256k1_v0_10_0_scalar_zero; + rustsecp256k1_v0_10_0_scalar scalars[128]; + rustsecp256k1_v0_10_0_gej gejs[128]; /* The expected result, and the computed result. */ - rustsecp256k1_v0_9_2_gej expected, computed; + rustsecp256k1_v0_10_0_gej expected, computed; /* Temporaries. */ - rustsecp256k1_v0_9_2_scalar sc_tmp; - rustsecp256k1_v0_9_2_ge ge_tmp; + rustsecp256k1_v0_10_0_scalar sc_tmp; + rustsecp256k1_v0_10_0_ge ge_tmp; /* Variables needed for the actual input to ecmult_multi. */ - rustsecp256k1_v0_9_2_ge ges[128]; + rustsecp256k1_v0_10_0_ge ges[128]; ecmult_multi_data data; int i; /* Which multiplication function to use */ - int fn = rustsecp256k1_v0_9_2_testrand_int(3); - rustsecp256k1_v0_9_2_ecmult_multi_func ecmult_multi = fn == 0 ? rustsecp256k1_v0_9_2_ecmult_multi_var : - fn == 1 ? rustsecp256k1_v0_9_2_ecmult_strauss_batch_single : - rustsecp256k1_v0_9_2_ecmult_pippenger_batch_single; + int fn = rustsecp256k1_v0_10_0_testrand_int(3); + rustsecp256k1_v0_10_0_ecmult_multi_func ecmult_multi = fn == 0 ? rustsecp256k1_v0_10_0_ecmult_multi_var : + fn == 1 ? rustsecp256k1_v0_10_0_ecmult_strauss_batch_single : + rustsecp256k1_v0_10_0_ecmult_pippenger_batch_single; /* Simulate exponentially distributed num. */ - int num_bits = 2 + rustsecp256k1_v0_9_2_testrand_int(6); + int num_bits = 2 + rustsecp256k1_v0_10_0_testrand_int(6); /* Number of (scalar, point) inputs (excluding g). */ - int num = rustsecp256k1_v0_9_2_testrand_int((1 << num_bits) + 1); + int num = rustsecp256k1_v0_10_0_testrand_int((1 << num_bits) + 1); /* Number of those which are nonzero. */ - int num_nonzero = rustsecp256k1_v0_9_2_testrand_int(num + 1); + int num_nonzero = rustsecp256k1_v0_10_0_testrand_int(num + 1); /* Whether we're aiming to create an input with nonzero expected result. */ - int nonzero_result = rustsecp256k1_v0_9_2_testrand_bits(1); + int nonzero_result = rustsecp256k1_v0_10_0_testrand_bits(1); /* Whether we will provide nonzero g multiplicand. In some cases our hand * is forced here based on num_nonzero and nonzero_result. */ int g_nonzero = num_nonzero == 0 ? nonzero_result : num_nonzero == 1 && !nonzero_result ? 1 : - (int)rustsecp256k1_v0_9_2_testrand_bits(1); + (int)rustsecp256k1_v0_10_0_testrand_bits(1); /* Which g_scalar pointer to pass into ecmult_multi(). */ - const rustsecp256k1_v0_9_2_scalar* g_scalar_ptr = (g_nonzero || rustsecp256k1_v0_9_2_testrand_bits(1)) ? &g_scalar : NULL; + const rustsecp256k1_v0_10_0_scalar* g_scalar_ptr = (g_nonzero || rustsecp256k1_v0_10_0_testrand_bits(1)) ? &g_scalar : NULL; /* How many EC multiplications were performed in this function. */ int mults = 0; /* How many randomization steps to apply to the input list. */ - int rands = (int)rustsecp256k1_v0_9_2_testrand_bits(3); + int rands = (int)rustsecp256k1_v0_10_0_testrand_bits(3); if (rands > num_nonzero) rands = num_nonzero; - rustsecp256k1_v0_9_2_gej_set_infinity(&expected); - rustsecp256k1_v0_9_2_gej_set_infinity(&gejs[0]); - rustsecp256k1_v0_9_2_scalar_set_int(&scalars[0], 0); + rustsecp256k1_v0_10_0_gej_set_infinity(&expected); + rustsecp256k1_v0_10_0_gej_set_infinity(&gejs[0]); + rustsecp256k1_v0_10_0_scalar_set_int(&scalars[0], 0); if (g_nonzero) { /* If g_nonzero, set g_scalar to nonzero value r. */ @@ -4922,10 +4925,10 @@ static int test_ecmult_multi_random(rustsecp256k1_v0_9_2_scratch *scratch) { /* If expected=0 is desired, add a (a*r, -(1/a)*g) term to compensate. */ CHECK(num_nonzero > filled); random_scalar_order_test(&sc_tmp); - rustsecp256k1_v0_9_2_scalar_mul(&scalars[filled], &sc_tmp, &g_scalar); - rustsecp256k1_v0_9_2_scalar_inverse_var(&sc_tmp, &sc_tmp); - rustsecp256k1_v0_9_2_scalar_negate(&sc_tmp, &sc_tmp); - rustsecp256k1_v0_9_2_ecmult_gen(&CTX->ecmult_gen_ctx, &gejs[filled], &sc_tmp); + rustsecp256k1_v0_10_0_scalar_mul(&scalars[filled], &sc_tmp, &g_scalar); + rustsecp256k1_v0_10_0_scalar_inverse_var(&sc_tmp, &sc_tmp); + rustsecp256k1_v0_10_0_scalar_negate(&sc_tmp, &sc_tmp); + rustsecp256k1_v0_10_0_ecmult_gen(&CTX->ecmult_gen_ctx, &gejs[filled], &sc_tmp); ++filled; ++mults; } @@ -4935,14 +4938,14 @@ static int test_ecmult_multi_random(rustsecp256k1_v0_9_2_scratch *scratch) { /* If a nonzero result is desired, and there is space, add a random nonzero term. */ random_scalar_order_test(&scalars[filled]); random_group_element_test(&ge_tmp); - rustsecp256k1_v0_9_2_gej_set_ge(&gejs[filled], &ge_tmp); + rustsecp256k1_v0_10_0_gej_set_ge(&gejs[filled], &ge_tmp); ++filled; } if (nonzero_result) { /* Compute the expected result using normal ecmult. */ CHECK(filled <= 1); - rustsecp256k1_v0_9_2_ecmult(&expected, &gejs[0], &scalars[0], &g_scalar); + rustsecp256k1_v0_10_0_ecmult(&expected, &gejs[0], &scalars[0], &g_scalar); mults += filled + g_nonzero; } @@ -4953,13 +4956,13 @@ static int test_ecmult_multi_random(rustsecp256k1_v0_9_2_scratch *scratch) { /* Add entries to scalars,gejs so that there are num of them. All the added entries * either have scalar=0 or point=infinity, so these do not change the expected result. */ while (filled < num) { - if (rustsecp256k1_v0_9_2_testrand_bits(1)) { - rustsecp256k1_v0_9_2_gej_set_infinity(&gejs[filled]); + if (rustsecp256k1_v0_10_0_testrand_bits(1)) { + rustsecp256k1_v0_10_0_gej_set_infinity(&gejs[filled]); random_scalar_order_test(&scalars[filled]); } else { - rustsecp256k1_v0_9_2_scalar_set_int(&scalars[filled], 0); + rustsecp256k1_v0_10_0_scalar_set_int(&scalars[filled], 0); random_group_element_test(&ge_tmp); - rustsecp256k1_v0_9_2_gej_set_ge(&gejs[filled], &ge_tmp); + rustsecp256k1_v0_10_0_gej_set_ge(&gejs[filled], &ge_tmp); } ++filled; } @@ -4969,13 +4972,13 @@ static int test_ecmult_multi_random(rustsecp256k1_v0_9_2_scratch *scratch) { * convert some of them to be both non-0-scalar and non-infinity-point. */ for (i = 0; i < rands; ++i) { int j; - rustsecp256k1_v0_9_2_scalar v, iv; + rustsecp256k1_v0_10_0_scalar v, iv; /* Shuffle the entries. */ for (j = 0; j < num_nonzero; ++j) { - int k = rustsecp256k1_v0_9_2_testrand_int(num_nonzero - j); + int k = rustsecp256k1_v0_10_0_testrand_int(num_nonzero - j); if (k != 0) { - rustsecp256k1_v0_9_2_gej gej = gejs[j]; - rustsecp256k1_v0_9_2_scalar sc = scalars[j]; + rustsecp256k1_v0_10_0_gej gej = gejs[j]; + rustsecp256k1_v0_10_0_scalar sc = scalars[j]; gejs[j] = gejs[j + k]; scalars[j] = scalars[j + k]; gejs[j + k] = gej; @@ -4985,26 +4988,26 @@ static int test_ecmult_multi_random(rustsecp256k1_v0_9_2_scratch *scratch) { /* Perturb all consecutive pairs of inputs: * a*P + b*Q -> (a+b)*P + b*(Q-P). */ for (j = 0; j + 1 < num_nonzero; j += 2) { - rustsecp256k1_v0_9_2_gej gej; - rustsecp256k1_v0_9_2_scalar_add(&scalars[j], &scalars[j], &scalars[j+1]); - rustsecp256k1_v0_9_2_gej_neg(&gej, &gejs[j]); - rustsecp256k1_v0_9_2_gej_add_var(&gejs[j+1], &gejs[j+1], &gej, NULL); + rustsecp256k1_v0_10_0_gej gej; + rustsecp256k1_v0_10_0_scalar_add(&scalars[j], &scalars[j], &scalars[j+1]); + rustsecp256k1_v0_10_0_gej_neg(&gej, &gejs[j]); + rustsecp256k1_v0_10_0_gej_add_var(&gejs[j+1], &gejs[j+1], &gej, NULL); } /* Transform the last input: a*P -> (v*a) * ((1/v)*P). */ CHECK(num_nonzero >= 1); random_scalar_order_test(&v); - rustsecp256k1_v0_9_2_scalar_inverse(&iv, &v); - rustsecp256k1_v0_9_2_scalar_mul(&scalars[num_nonzero - 1], &scalars[num_nonzero - 1], &v); - rustsecp256k1_v0_9_2_ecmult(&gejs[num_nonzero - 1], &gejs[num_nonzero - 1], &iv, NULL); + rustsecp256k1_v0_10_0_scalar_inverse(&iv, &v); + rustsecp256k1_v0_10_0_scalar_mul(&scalars[num_nonzero - 1], &scalars[num_nonzero - 1], &v); + rustsecp256k1_v0_10_0_ecmult(&gejs[num_nonzero - 1], &gejs[num_nonzero - 1], &iv, NULL); ++mults; } /* Shuffle all entries (0..num-1). */ for (i = 0; i < num; ++i) { - int j = rustsecp256k1_v0_9_2_testrand_int(num - i); + int j = rustsecp256k1_v0_10_0_testrand_int(num - i); if (j != 0) { - rustsecp256k1_v0_9_2_gej gej = gejs[i]; - rustsecp256k1_v0_9_2_scalar sc = scalars[i]; + rustsecp256k1_v0_10_0_gej gej = gejs[i]; + rustsecp256k1_v0_10_0_scalar sc = scalars[i]; gejs[i] = gejs[i + j]; scalars[i] = scalars[i + j]; gejs[i + j] = gej; @@ -5013,23 +5016,23 @@ static int test_ecmult_multi_random(rustsecp256k1_v0_9_2_scratch *scratch) { } /* Compute affine versions of all inputs. */ - rustsecp256k1_v0_9_2_ge_set_all_gej_var(ges, gejs, filled); + rustsecp256k1_v0_10_0_ge_set_all_gej_var(ges, gejs, filled); /* Invoke ecmult_multi code. */ data.sc = scalars; data.pt = ges; CHECK(ecmult_multi(&CTX->error_callback, scratch, &computed, g_scalar_ptr, ecmult_multi_callback, &data, filled)); mults += num_nonzero + g_nonzero; /* Compare with expected result. */ - CHECK(rustsecp256k1_v0_9_2_gej_eq_var(&computed, &expected)); + CHECK(rustsecp256k1_v0_10_0_gej_eq_var(&computed, &expected)); return mults; } -static void test_ecmult_multi_batch_single(rustsecp256k1_v0_9_2_ecmult_multi_func ecmult_multi) { - rustsecp256k1_v0_9_2_scalar sc; - rustsecp256k1_v0_9_2_ge pt; - rustsecp256k1_v0_9_2_gej r; +static void test_ecmult_multi_batch_single(rustsecp256k1_v0_10_0_ecmult_multi_func ecmult_multi) { + rustsecp256k1_v0_10_0_scalar sc; + rustsecp256k1_v0_10_0_ge pt; + rustsecp256k1_v0_10_0_gej r; ecmult_multi_data data; - rustsecp256k1_v0_9_2_scratch *scratch_empty; + rustsecp256k1_v0_10_0_scratch *scratch_empty; random_group_element_test(&pt); random_scalar_order(&sc); @@ -5037,23 +5040,23 @@ static void test_ecmult_multi_batch_single(rustsecp256k1_v0_9_2_ecmult_multi_fun data.pt = &pt; /* Try to multiply 1 point, but scratch space is empty.*/ - scratch_empty = rustsecp256k1_v0_9_2_scratch_create(&CTX->error_callback, 0); - CHECK(!ecmult_multi(&CTX->error_callback, scratch_empty, &r, &rustsecp256k1_v0_9_2_scalar_zero, ecmult_multi_callback, &data, 1)); - rustsecp256k1_v0_9_2_scratch_destroy(&CTX->error_callback, scratch_empty); + scratch_empty = rustsecp256k1_v0_10_0_scratch_create(&CTX->error_callback, 0); + CHECK(!ecmult_multi(&CTX->error_callback, scratch_empty, &r, &rustsecp256k1_v0_10_0_scalar_zero, ecmult_multi_callback, &data, 1)); + rustsecp256k1_v0_10_0_scratch_destroy(&CTX->error_callback, scratch_empty); } -static void test_rustsecp256k1_v0_9_2_pippenger_bucket_window_inv(void) { +static void test_rustsecp256k1_v0_10_0_pippenger_bucket_window_inv(void) { int i; - CHECK(rustsecp256k1_v0_9_2_pippenger_bucket_window_inv(0) == 0); + CHECK(rustsecp256k1_v0_10_0_pippenger_bucket_window_inv(0) == 0); for(i = 1; i <= PIPPENGER_MAX_BUCKET_WINDOW; i++) { /* Bucket_window of 8 is not used with endo */ if (i == 8) { continue; } - CHECK(rustsecp256k1_v0_9_2_pippenger_bucket_window(rustsecp256k1_v0_9_2_pippenger_bucket_window_inv(i)) == i); + CHECK(rustsecp256k1_v0_10_0_pippenger_bucket_window(rustsecp256k1_v0_10_0_pippenger_bucket_window_inv(i)) == i); if (i != PIPPENGER_MAX_BUCKET_WINDOW) { - CHECK(rustsecp256k1_v0_9_2_pippenger_bucket_window(rustsecp256k1_v0_9_2_pippenger_bucket_window_inv(i)+1) > i); + CHECK(rustsecp256k1_v0_10_0_pippenger_bucket_window(rustsecp256k1_v0_10_0_pippenger_bucket_window_inv(i)+1) > i); } } } @@ -5063,9 +5066,9 @@ static void test_rustsecp256k1_v0_9_2_pippenger_bucket_window_inv(void) { * for a given scratch space. */ static void test_ecmult_multi_pippenger_max_points(void) { - size_t scratch_size = rustsecp256k1_v0_9_2_testrand_bits(8); - size_t max_size = rustsecp256k1_v0_9_2_pippenger_scratch_size(rustsecp256k1_v0_9_2_pippenger_bucket_window_inv(PIPPENGER_MAX_BUCKET_WINDOW-1)+512, 12); - rustsecp256k1_v0_9_2_scratch *scratch; + size_t scratch_size = rustsecp256k1_v0_10_0_testrand_bits(8); + size_t max_size = rustsecp256k1_v0_10_0_pippenger_scratch_size(rustsecp256k1_v0_10_0_pippenger_bucket_window_inv(PIPPENGER_MAX_BUCKET_WINDOW-1)+512, 12); + rustsecp256k1_v0_10_0_scratch *scratch; size_t n_points_supported; int bucket_window = 0; @@ -5073,24 +5076,24 @@ static void test_ecmult_multi_pippenger_max_points(void) { size_t i; size_t total_alloc; size_t checkpoint; - scratch = rustsecp256k1_v0_9_2_scratch_create(&CTX->error_callback, scratch_size); + scratch = rustsecp256k1_v0_10_0_scratch_create(&CTX->error_callback, scratch_size); CHECK(scratch != NULL); - checkpoint = rustsecp256k1_v0_9_2_scratch_checkpoint(&CTX->error_callback, scratch); - n_points_supported = rustsecp256k1_v0_9_2_pippenger_max_points(&CTX->error_callback, scratch); + checkpoint = rustsecp256k1_v0_10_0_scratch_checkpoint(&CTX->error_callback, scratch); + n_points_supported = rustsecp256k1_v0_10_0_pippenger_max_points(&CTX->error_callback, scratch); if (n_points_supported == 0) { - rustsecp256k1_v0_9_2_scratch_destroy(&CTX->error_callback, scratch); + rustsecp256k1_v0_10_0_scratch_destroy(&CTX->error_callback, scratch); continue; } - bucket_window = rustsecp256k1_v0_9_2_pippenger_bucket_window(n_points_supported); + bucket_window = rustsecp256k1_v0_10_0_pippenger_bucket_window(n_points_supported); /* allocate `total_alloc` bytes over `PIPPENGER_SCRATCH_OBJECTS` many allocations */ - total_alloc = rustsecp256k1_v0_9_2_pippenger_scratch_size(n_points_supported, bucket_window); + total_alloc = rustsecp256k1_v0_10_0_pippenger_scratch_size(n_points_supported, bucket_window); for (i = 0; i < PIPPENGER_SCRATCH_OBJECTS - 1; i++) { - CHECK(rustsecp256k1_v0_9_2_scratch_alloc(&CTX->error_callback, scratch, 1)); + CHECK(rustsecp256k1_v0_10_0_scratch_alloc(&CTX->error_callback, scratch, 1)); total_alloc--; } - CHECK(rustsecp256k1_v0_9_2_scratch_alloc(&CTX->error_callback, scratch, total_alloc)); - rustsecp256k1_v0_9_2_scratch_apply_checkpoint(&CTX->error_callback, scratch, checkpoint); - rustsecp256k1_v0_9_2_scratch_destroy(&CTX->error_callback, scratch); + CHECK(rustsecp256k1_v0_10_0_scratch_alloc(&CTX->error_callback, scratch, total_alloc)); + rustsecp256k1_v0_10_0_scratch_apply_checkpoint(&CTX->error_callback, scratch, checkpoint); + rustsecp256k1_v0_10_0_scratch_destroy(&CTX->error_callback, scratch); } CHECK(bucket_window == PIPPENGER_MAX_BUCKET_WINDOW); } @@ -5100,154 +5103,154 @@ static void test_ecmult_multi_batch_size_helper(void) { max_n_batch_points = 0; n = 1; - CHECK(rustsecp256k1_v0_9_2_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 0); + CHECK(rustsecp256k1_v0_10_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 0); max_n_batch_points = 1; n = 0; - CHECK(rustsecp256k1_v0_9_2_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); + CHECK(rustsecp256k1_v0_10_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); CHECK(n_batches == 0); CHECK(n_batch_points == 0); max_n_batch_points = 2; n = 5; - CHECK(rustsecp256k1_v0_9_2_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); + CHECK(rustsecp256k1_v0_10_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); CHECK(n_batches == 3); CHECK(n_batch_points == 2); max_n_batch_points = ECMULT_MAX_POINTS_PER_BATCH; n = ECMULT_MAX_POINTS_PER_BATCH; - CHECK(rustsecp256k1_v0_9_2_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); + CHECK(rustsecp256k1_v0_10_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); CHECK(n_batches == 1); CHECK(n_batch_points == ECMULT_MAX_POINTS_PER_BATCH); max_n_batch_points = ECMULT_MAX_POINTS_PER_BATCH + 1; n = ECMULT_MAX_POINTS_PER_BATCH + 1; - CHECK(rustsecp256k1_v0_9_2_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); + CHECK(rustsecp256k1_v0_10_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); CHECK(n_batches == 2); CHECK(n_batch_points == ECMULT_MAX_POINTS_PER_BATCH/2 + 1); max_n_batch_points = 1; n = SIZE_MAX; - CHECK(rustsecp256k1_v0_9_2_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); + CHECK(rustsecp256k1_v0_10_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); CHECK(n_batches == SIZE_MAX); CHECK(n_batch_points == 1); max_n_batch_points = 2; n = SIZE_MAX; - CHECK(rustsecp256k1_v0_9_2_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); + CHECK(rustsecp256k1_v0_10_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); CHECK(n_batches == SIZE_MAX/2 + 1); CHECK(n_batch_points == 2); } /** - * Run rustsecp256k1_v0_9_2_ecmult_multi_var with num points and a scratch space restricted to + * Run rustsecp256k1_v0_10_0_ecmult_multi_var with num points and a scratch space restricted to * 1 <= i <= num points. */ static void test_ecmult_multi_batching(void) { static const int n_points = 2*ECMULT_PIPPENGER_THRESHOLD; - rustsecp256k1_v0_9_2_scalar scG; - rustsecp256k1_v0_9_2_scalar *sc = (rustsecp256k1_v0_9_2_scalar *)checked_malloc(&CTX->error_callback, sizeof(rustsecp256k1_v0_9_2_scalar) * n_points); - rustsecp256k1_v0_9_2_ge *pt = (rustsecp256k1_v0_9_2_ge *)checked_malloc(&CTX->error_callback, sizeof(rustsecp256k1_v0_9_2_ge) * n_points); - rustsecp256k1_v0_9_2_gej r; - rustsecp256k1_v0_9_2_gej r2; + rustsecp256k1_v0_10_0_scalar scG; + rustsecp256k1_v0_10_0_scalar *sc = (rustsecp256k1_v0_10_0_scalar *)checked_malloc(&CTX->error_callback, sizeof(rustsecp256k1_v0_10_0_scalar) * n_points); + rustsecp256k1_v0_10_0_ge *pt = (rustsecp256k1_v0_10_0_ge *)checked_malloc(&CTX->error_callback, sizeof(rustsecp256k1_v0_10_0_ge) * n_points); + rustsecp256k1_v0_10_0_gej r; + rustsecp256k1_v0_10_0_gej r2; ecmult_multi_data data; int i; - rustsecp256k1_v0_9_2_scratch *scratch; + rustsecp256k1_v0_10_0_scratch *scratch; - rustsecp256k1_v0_9_2_gej_set_infinity(&r2); + rustsecp256k1_v0_10_0_gej_set_infinity(&r2); /* Get random scalars and group elements and compute result */ random_scalar_order(&scG); - rustsecp256k1_v0_9_2_ecmult(&r2, &r2, &rustsecp256k1_v0_9_2_scalar_zero, &scG); + rustsecp256k1_v0_10_0_ecmult(&r2, &r2, &rustsecp256k1_v0_10_0_scalar_zero, &scG); for(i = 0; i < n_points; i++) { - rustsecp256k1_v0_9_2_ge ptg; - rustsecp256k1_v0_9_2_gej ptgj; + rustsecp256k1_v0_10_0_ge ptg; + rustsecp256k1_v0_10_0_gej ptgj; random_group_element_test(&ptg); - rustsecp256k1_v0_9_2_gej_set_ge(&ptgj, &ptg); + rustsecp256k1_v0_10_0_gej_set_ge(&ptgj, &ptg); pt[i] = ptg; random_scalar_order(&sc[i]); - rustsecp256k1_v0_9_2_ecmult(&ptgj, &ptgj, &sc[i], NULL); - rustsecp256k1_v0_9_2_gej_add_var(&r2, &r2, &ptgj, NULL); + rustsecp256k1_v0_10_0_ecmult(&ptgj, &ptgj, &sc[i], NULL); + rustsecp256k1_v0_10_0_gej_add_var(&r2, &r2, &ptgj, NULL); } data.sc = sc; data.pt = pt; - rustsecp256k1_v0_9_2_gej_neg(&r2, &r2); + rustsecp256k1_v0_10_0_gej_neg(&r2, &r2); /* Test with empty scratch space. It should compute the correct result using * ecmult_mult_simple algorithm which doesn't require a scratch space. */ - scratch = rustsecp256k1_v0_9_2_scratch_create(&CTX->error_callback, 0); - CHECK(rustsecp256k1_v0_9_2_ecmult_multi_var(&CTX->error_callback, scratch, &r, &scG, ecmult_multi_callback, &data, n_points)); - rustsecp256k1_v0_9_2_gej_add_var(&r, &r, &r2, NULL); - CHECK(rustsecp256k1_v0_9_2_gej_is_infinity(&r)); - rustsecp256k1_v0_9_2_scratch_destroy(&CTX->error_callback, scratch); + scratch = rustsecp256k1_v0_10_0_scratch_create(&CTX->error_callback, 0); + CHECK(rustsecp256k1_v0_10_0_ecmult_multi_var(&CTX->error_callback, scratch, &r, &scG, ecmult_multi_callback, &data, n_points)); + rustsecp256k1_v0_10_0_gej_add_var(&r, &r, &r2, NULL); + CHECK(rustsecp256k1_v0_10_0_gej_is_infinity(&r)); + rustsecp256k1_v0_10_0_scratch_destroy(&CTX->error_callback, scratch); /* Test with space for 1 point in pippenger. That's not enough because * ecmult_multi selects strauss which requires more memory. It should * therefore select the simple algorithm. */ - scratch = rustsecp256k1_v0_9_2_scratch_create(&CTX->error_callback, rustsecp256k1_v0_9_2_pippenger_scratch_size(1, 1) + PIPPENGER_SCRATCH_OBJECTS*ALIGNMENT); - CHECK(rustsecp256k1_v0_9_2_ecmult_multi_var(&CTX->error_callback, scratch, &r, &scG, ecmult_multi_callback, &data, n_points)); - rustsecp256k1_v0_9_2_gej_add_var(&r, &r, &r2, NULL); - CHECK(rustsecp256k1_v0_9_2_gej_is_infinity(&r)); - rustsecp256k1_v0_9_2_scratch_destroy(&CTX->error_callback, scratch); + scratch = rustsecp256k1_v0_10_0_scratch_create(&CTX->error_callback, rustsecp256k1_v0_10_0_pippenger_scratch_size(1, 1) + PIPPENGER_SCRATCH_OBJECTS*ALIGNMENT); + CHECK(rustsecp256k1_v0_10_0_ecmult_multi_var(&CTX->error_callback, scratch, &r, &scG, ecmult_multi_callback, &data, n_points)); + rustsecp256k1_v0_10_0_gej_add_var(&r, &r, &r2, NULL); + CHECK(rustsecp256k1_v0_10_0_gej_is_infinity(&r)); + rustsecp256k1_v0_10_0_scratch_destroy(&CTX->error_callback, scratch); for(i = 1; i <= n_points; i++) { if (i > ECMULT_PIPPENGER_THRESHOLD) { - int bucket_window = rustsecp256k1_v0_9_2_pippenger_bucket_window(i); - size_t scratch_size = rustsecp256k1_v0_9_2_pippenger_scratch_size(i, bucket_window); - scratch = rustsecp256k1_v0_9_2_scratch_create(&CTX->error_callback, scratch_size + PIPPENGER_SCRATCH_OBJECTS*ALIGNMENT); + int bucket_window = rustsecp256k1_v0_10_0_pippenger_bucket_window(i); + size_t scratch_size = rustsecp256k1_v0_10_0_pippenger_scratch_size(i, bucket_window); + scratch = rustsecp256k1_v0_10_0_scratch_create(&CTX->error_callback, scratch_size + PIPPENGER_SCRATCH_OBJECTS*ALIGNMENT); } else { - size_t scratch_size = rustsecp256k1_v0_9_2_strauss_scratch_size(i); - scratch = rustsecp256k1_v0_9_2_scratch_create(&CTX->error_callback, scratch_size + STRAUSS_SCRATCH_OBJECTS*ALIGNMENT); + size_t scratch_size = rustsecp256k1_v0_10_0_strauss_scratch_size(i); + scratch = rustsecp256k1_v0_10_0_scratch_create(&CTX->error_callback, scratch_size + STRAUSS_SCRATCH_OBJECTS*ALIGNMENT); } - CHECK(rustsecp256k1_v0_9_2_ecmult_multi_var(&CTX->error_callback, scratch, &r, &scG, ecmult_multi_callback, &data, n_points)); - rustsecp256k1_v0_9_2_gej_add_var(&r, &r, &r2, NULL); - CHECK(rustsecp256k1_v0_9_2_gej_is_infinity(&r)); - rustsecp256k1_v0_9_2_scratch_destroy(&CTX->error_callback, scratch); + CHECK(rustsecp256k1_v0_10_0_ecmult_multi_var(&CTX->error_callback, scratch, &r, &scG, ecmult_multi_callback, &data, n_points)); + rustsecp256k1_v0_10_0_gej_add_var(&r, &r, &r2, NULL); + CHECK(rustsecp256k1_v0_10_0_gej_is_infinity(&r)); + rustsecp256k1_v0_10_0_scratch_destroy(&CTX->error_callback, scratch); } free(sc); free(pt); } static void run_ecmult_multi_tests(void) { - rustsecp256k1_v0_9_2_scratch *scratch; + rustsecp256k1_v0_10_0_scratch *scratch; int64_t todo = (int64_t)320 * COUNT; - test_rustsecp256k1_v0_9_2_pippenger_bucket_window_inv(); + test_rustsecp256k1_v0_10_0_pippenger_bucket_window_inv(); test_ecmult_multi_pippenger_max_points(); - scratch = rustsecp256k1_v0_9_2_scratch_create(&CTX->error_callback, 819200); - test_ecmult_multi(scratch, rustsecp256k1_v0_9_2_ecmult_multi_var); - test_ecmult_multi(NULL, rustsecp256k1_v0_9_2_ecmult_multi_var); - test_ecmult_multi(scratch, rustsecp256k1_v0_9_2_ecmult_pippenger_batch_single); - test_ecmult_multi_batch_single(rustsecp256k1_v0_9_2_ecmult_pippenger_batch_single); - test_ecmult_multi(scratch, rustsecp256k1_v0_9_2_ecmult_strauss_batch_single); - test_ecmult_multi_batch_single(rustsecp256k1_v0_9_2_ecmult_strauss_batch_single); + scratch = rustsecp256k1_v0_10_0_scratch_create(&CTX->error_callback, 819200); + test_ecmult_multi(scratch, rustsecp256k1_v0_10_0_ecmult_multi_var); + test_ecmult_multi(NULL, rustsecp256k1_v0_10_0_ecmult_multi_var); + test_ecmult_multi(scratch, rustsecp256k1_v0_10_0_ecmult_pippenger_batch_single); + test_ecmult_multi_batch_single(rustsecp256k1_v0_10_0_ecmult_pippenger_batch_single); + test_ecmult_multi(scratch, rustsecp256k1_v0_10_0_ecmult_strauss_batch_single); + test_ecmult_multi_batch_single(rustsecp256k1_v0_10_0_ecmult_strauss_batch_single); while (todo > 0) { todo -= test_ecmult_multi_random(scratch); } - rustsecp256k1_v0_9_2_scratch_destroy(&CTX->error_callback, scratch); + rustsecp256k1_v0_10_0_scratch_destroy(&CTX->error_callback, scratch); /* Run test_ecmult_multi with space for exactly one point */ - scratch = rustsecp256k1_v0_9_2_scratch_create(&CTX->error_callback, rustsecp256k1_v0_9_2_strauss_scratch_size(1) + STRAUSS_SCRATCH_OBJECTS*ALIGNMENT); - test_ecmult_multi(scratch, rustsecp256k1_v0_9_2_ecmult_multi_var); - rustsecp256k1_v0_9_2_scratch_destroy(&CTX->error_callback, scratch); + scratch = rustsecp256k1_v0_10_0_scratch_create(&CTX->error_callback, rustsecp256k1_v0_10_0_strauss_scratch_size(1) + STRAUSS_SCRATCH_OBJECTS*ALIGNMENT); + test_ecmult_multi(scratch, rustsecp256k1_v0_10_0_ecmult_multi_var); + rustsecp256k1_v0_10_0_scratch_destroy(&CTX->error_callback, scratch); test_ecmult_multi_batch_size_helper(); test_ecmult_multi_batching(); } -static void test_wnaf(const rustsecp256k1_v0_9_2_scalar *number, int w) { - rustsecp256k1_v0_9_2_scalar x, two, t; +static void test_wnaf(const rustsecp256k1_v0_10_0_scalar *number, int w) { + rustsecp256k1_v0_10_0_scalar x, two, t; int wnaf[256]; int zeroes = -1; int i; int bits; - rustsecp256k1_v0_9_2_scalar_set_int(&x, 0); - rustsecp256k1_v0_9_2_scalar_set_int(&two, 2); - bits = rustsecp256k1_v0_9_2_ecmult_wnaf(wnaf, 256, number, w); + rustsecp256k1_v0_10_0_scalar_set_int(&x, 0); + rustsecp256k1_v0_10_0_scalar_set_int(&two, 2); + bits = rustsecp256k1_v0_10_0_ecmult_wnaf(wnaf, 256, number, w); CHECK(bits <= 256); for (i = bits-1; i >= 0; i--) { int v = wnaf[i]; - rustsecp256k1_v0_9_2_scalar_mul(&x, &x, &two); + rustsecp256k1_v0_10_0_scalar_mul(&x, &x, &two); if (v) { CHECK(zeroes == -1 || zeroes >= w-1); /* check that distance between non-zero elements is at least w-1 */ zeroes=0; @@ -5259,104 +5262,48 @@ static void test_wnaf(const rustsecp256k1_v0_9_2_scalar *number, int w) { zeroes++; } if (v >= 0) { - rustsecp256k1_v0_9_2_scalar_set_int(&t, v); + rustsecp256k1_v0_10_0_scalar_set_int(&t, v); } else { - rustsecp256k1_v0_9_2_scalar_set_int(&t, -v); - rustsecp256k1_v0_9_2_scalar_negate(&t, &t); + rustsecp256k1_v0_10_0_scalar_set_int(&t, -v); + rustsecp256k1_v0_10_0_scalar_negate(&t, &t); } - rustsecp256k1_v0_9_2_scalar_add(&x, &x, &t); - } - CHECK(rustsecp256k1_v0_9_2_scalar_eq(&x, number)); /* check that wnaf represents number */ -} - -static void test_constant_wnaf_negate(const rustsecp256k1_v0_9_2_scalar *number) { - rustsecp256k1_v0_9_2_scalar neg1 = *number; - rustsecp256k1_v0_9_2_scalar neg2 = *number; - int sign1 = 1; - int sign2 = 1; - - if (!rustsecp256k1_v0_9_2_scalar_get_bits(&neg1, 0, 1)) { - rustsecp256k1_v0_9_2_scalar_negate(&neg1, &neg1); - sign1 = -1; + rustsecp256k1_v0_10_0_scalar_add(&x, &x, &t); } - sign2 = rustsecp256k1_v0_9_2_scalar_cond_negate(&neg2, rustsecp256k1_v0_9_2_scalar_is_even(&neg2)); - CHECK(sign1 == sign2); - CHECK(rustsecp256k1_v0_9_2_scalar_eq(&neg1, &neg2)); + CHECK(rustsecp256k1_v0_10_0_scalar_eq(&x, number)); /* check that wnaf represents number */ } -static void test_constant_wnaf(const rustsecp256k1_v0_9_2_scalar *number, int w) { - rustsecp256k1_v0_9_2_scalar x, shift; +static void test_fixed_wnaf(const rustsecp256k1_v0_10_0_scalar *number, int w) { + rustsecp256k1_v0_10_0_scalar x, shift; int wnaf[256] = {0}; int i; int skew; - int bits = 256; - rustsecp256k1_v0_9_2_scalar num = *number; - rustsecp256k1_v0_9_2_scalar scalar_skew; + rustsecp256k1_v0_10_0_scalar num, unused; - rustsecp256k1_v0_9_2_scalar_set_int(&x, 0); - rustsecp256k1_v0_9_2_scalar_set_int(&shift, 1 << w); - for (i = 0; i < 16; ++i) { - rustsecp256k1_v0_9_2_scalar_shr_int(&num, 8); - } - bits = 128; - skew = rustsecp256k1_v0_9_2_wnaf_const(wnaf, &num, w, bits); - - for (i = WNAF_SIZE_BITS(bits, w); i >= 0; --i) { - rustsecp256k1_v0_9_2_scalar t; - int v = wnaf[i]; - CHECK(v != 0); /* check nonzero */ - CHECK(v & 1); /* check parity */ - CHECK(v > -(1 << w)); /* check range above */ - CHECK(v < (1 << w)); /* check range below */ - - rustsecp256k1_v0_9_2_scalar_mul(&x, &x, &shift); - if (v >= 0) { - rustsecp256k1_v0_9_2_scalar_set_int(&t, v); - } else { - rustsecp256k1_v0_9_2_scalar_set_int(&t, -v); - rustsecp256k1_v0_9_2_scalar_negate(&t, &t); - } - rustsecp256k1_v0_9_2_scalar_add(&x, &x, &t); - } - /* Skew num because when encoding numbers as odd we use an offset */ - rustsecp256k1_v0_9_2_scalar_set_int(&scalar_skew, skew); - rustsecp256k1_v0_9_2_scalar_add(&num, &num, &scalar_skew); - CHECK(rustsecp256k1_v0_9_2_scalar_eq(&x, &num)); -} - -static void test_fixed_wnaf(const rustsecp256k1_v0_9_2_scalar *number, int w) { - rustsecp256k1_v0_9_2_scalar x, shift; - int wnaf[256] = {0}; - int i; - int skew; - rustsecp256k1_v0_9_2_scalar num = *number; - - rustsecp256k1_v0_9_2_scalar_set_int(&x, 0); - rustsecp256k1_v0_9_2_scalar_set_int(&shift, 1 << w); - for (i = 0; i < 16; ++i) { - rustsecp256k1_v0_9_2_scalar_shr_int(&num, 8); - } - skew = rustsecp256k1_v0_9_2_wnaf_fixed(wnaf, &num, w); + rustsecp256k1_v0_10_0_scalar_set_int(&x, 0); + rustsecp256k1_v0_10_0_scalar_set_int(&shift, 1 << w); + /* Make num a 128-bit scalar. */ + rustsecp256k1_v0_10_0_scalar_split_128(&num, &unused, number); + skew = rustsecp256k1_v0_10_0_wnaf_fixed(wnaf, &num, w); for (i = WNAF_SIZE(w)-1; i >= 0; --i) { - rustsecp256k1_v0_9_2_scalar t; + rustsecp256k1_v0_10_0_scalar t; int v = wnaf[i]; CHECK(v == 0 || v & 1); /* check parity */ CHECK(v > -(1 << w)); /* check range above */ CHECK(v < (1 << w)); /* check range below */ - rustsecp256k1_v0_9_2_scalar_mul(&x, &x, &shift); + rustsecp256k1_v0_10_0_scalar_mul(&x, &x, &shift); if (v >= 0) { - rustsecp256k1_v0_9_2_scalar_set_int(&t, v); + rustsecp256k1_v0_10_0_scalar_set_int(&t, v); } else { - rustsecp256k1_v0_9_2_scalar_set_int(&t, -v); - rustsecp256k1_v0_9_2_scalar_negate(&t, &t); + rustsecp256k1_v0_10_0_scalar_set_int(&t, -v); + rustsecp256k1_v0_10_0_scalar_negate(&t, &t); } - rustsecp256k1_v0_9_2_scalar_add(&x, &x, &t); + rustsecp256k1_v0_10_0_scalar_add(&x, &x, &t); } /* If skew is 1 then add 1 to num */ - rustsecp256k1_v0_9_2_scalar_cadd_bit(&num, 0, skew == 1); - CHECK(rustsecp256k1_v0_9_2_scalar_eq(&x, &num)); + rustsecp256k1_v0_10_0_scalar_cadd_bit(&num, 0, skew == 1); + CHECK(rustsecp256k1_v0_10_0_scalar_eq(&x, &num)); } /* Checks that the first 8 elements of wnaf are equal to wnaf_expected and the @@ -5376,18 +5323,18 @@ static void test_fixed_wnaf_small(void) { int wnaf[256] = {0}; int i; int skew; - rustsecp256k1_v0_9_2_scalar num; + rustsecp256k1_v0_10_0_scalar num; - rustsecp256k1_v0_9_2_scalar_set_int(&num, 0); - skew = rustsecp256k1_v0_9_2_wnaf_fixed(wnaf, &num, w); + rustsecp256k1_v0_10_0_scalar_set_int(&num, 0); + skew = rustsecp256k1_v0_10_0_wnaf_fixed(wnaf, &num, w); for (i = WNAF_SIZE(w)-1; i >= 0; --i) { int v = wnaf[i]; CHECK(v == 0); } CHECK(skew == 0); - rustsecp256k1_v0_9_2_scalar_set_int(&num, 1); - skew = rustsecp256k1_v0_9_2_wnaf_fixed(wnaf, &num, w); + rustsecp256k1_v0_10_0_scalar_set_int(&num, 1); + skew = rustsecp256k1_v0_10_0_wnaf_fixed(wnaf, &num, w); for (i = WNAF_SIZE(w)-1; i >= 1; --i) { int v = wnaf[i]; CHECK(v == 0); @@ -5397,29 +5344,29 @@ static void test_fixed_wnaf_small(void) { { int wnaf_expected[8] = { 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf }; - rustsecp256k1_v0_9_2_scalar_set_int(&num, 0xffffffff); - skew = rustsecp256k1_v0_9_2_wnaf_fixed(wnaf, &num, w); + rustsecp256k1_v0_10_0_scalar_set_int(&num, 0xffffffff); + skew = rustsecp256k1_v0_10_0_wnaf_fixed(wnaf, &num, w); test_fixed_wnaf_small_helper(wnaf, wnaf_expected, w); CHECK(skew == 0); } { int wnaf_expected[8] = { -1, -1, -1, -1, -1, -1, -1, 0xf }; - rustsecp256k1_v0_9_2_scalar_set_int(&num, 0xeeeeeeee); - skew = rustsecp256k1_v0_9_2_wnaf_fixed(wnaf, &num, w); + rustsecp256k1_v0_10_0_scalar_set_int(&num, 0xeeeeeeee); + skew = rustsecp256k1_v0_10_0_wnaf_fixed(wnaf, &num, w); test_fixed_wnaf_small_helper(wnaf, wnaf_expected, w); CHECK(skew == 1); } { int wnaf_expected[8] = { 1, 0, 1, 0, 1, 0, 1, 0 }; - rustsecp256k1_v0_9_2_scalar_set_int(&num, 0x01010101); - skew = rustsecp256k1_v0_9_2_wnaf_fixed(wnaf, &num, w); + rustsecp256k1_v0_10_0_scalar_set_int(&num, 0x01010101); + skew = rustsecp256k1_v0_10_0_wnaf_fixed(wnaf, &num, w); test_fixed_wnaf_small_helper(wnaf, wnaf_expected, w); CHECK(skew == 0); } { int wnaf_expected[8] = { -0xf, 0, 0xf, -0xf, 0, 0xf, 1, 0 }; - rustsecp256k1_v0_9_2_scalar_set_int(&num, 0x01ef1ef1); - skew = rustsecp256k1_v0_9_2_wnaf_fixed(wnaf, &num, w); + rustsecp256k1_v0_10_0_scalar_set_int(&num, 0x01ef1ef1); + skew = rustsecp256k1_v0_10_0_wnaf_fixed(wnaf, &num, w); test_fixed_wnaf_small_helper(wnaf, wnaf_expected, w); CHECK(skew == 0); } @@ -5427,32 +5374,7 @@ static void test_fixed_wnaf_small(void) { static void run_wnaf(void) { int i; - rustsecp256k1_v0_9_2_scalar n = {{0}}; - - test_constant_wnaf(&n, 4); - /* Sanity check: 1 and 2 are the smallest odd and even numbers and should - * have easier-to-diagnose failure modes */ - n.d[0] = 1; - test_constant_wnaf(&n, 4); - n.d[0] = 2; - test_constant_wnaf(&n, 4); - /* Test -1, because it's a special case in wnaf_const */ - n = rustsecp256k1_v0_9_2_scalar_one; - rustsecp256k1_v0_9_2_scalar_negate(&n, &n); - test_constant_wnaf(&n, 4); - - /* Test -2, which may not lead to overflows in wnaf_const */ - rustsecp256k1_v0_9_2_scalar_add(&n, &rustsecp256k1_v0_9_2_scalar_one, &rustsecp256k1_v0_9_2_scalar_one); - rustsecp256k1_v0_9_2_scalar_negate(&n, &n); - test_constant_wnaf(&n, 4); - - /* Test (1/2) - 1 = 1/-2 and 1/2 = (1/-2) + 1 - as corner cases of negation handling in wnaf_const */ - rustsecp256k1_v0_9_2_scalar_inverse(&n, &n); - test_constant_wnaf(&n, 4); - - rustsecp256k1_v0_9_2_scalar_add(&n, &n, &rustsecp256k1_v0_9_2_scalar_one); - test_constant_wnaf(&n, 4); + rustsecp256k1_v0_10_0_scalar n; /* Test 0 for fixed wnaf */ test_fixed_wnaf_small(); @@ -5460,54 +5382,52 @@ static void run_wnaf(void) { for (i = 0; i < COUNT; i++) { random_scalar_order(&n); test_wnaf(&n, 4+(i%10)); - test_constant_wnaf_negate(&n); - test_constant_wnaf(&n, 4 + (i % 10)); test_fixed_wnaf(&n, 4 + (i % 10)); } - rustsecp256k1_v0_9_2_scalar_set_int(&n, 0); - CHECK(rustsecp256k1_v0_9_2_scalar_cond_negate(&n, 1) == -1); - CHECK(rustsecp256k1_v0_9_2_scalar_is_zero(&n)); - CHECK(rustsecp256k1_v0_9_2_scalar_cond_negate(&n, 0) == 1); - CHECK(rustsecp256k1_v0_9_2_scalar_is_zero(&n)); + rustsecp256k1_v0_10_0_scalar_set_int(&n, 0); + CHECK(rustsecp256k1_v0_10_0_scalar_cond_negate(&n, 1) == -1); + CHECK(rustsecp256k1_v0_10_0_scalar_is_zero(&n)); + CHECK(rustsecp256k1_v0_10_0_scalar_cond_negate(&n, 0) == 1); + CHECK(rustsecp256k1_v0_10_0_scalar_is_zero(&n)); } -static int test_ecmult_accumulate_cb(rustsecp256k1_v0_9_2_scalar* sc, rustsecp256k1_v0_9_2_ge* pt, size_t idx, void* data) { - const rustsecp256k1_v0_9_2_scalar* indata = (const rustsecp256k1_v0_9_2_scalar*)data; +static int test_ecmult_accumulate_cb(rustsecp256k1_v0_10_0_scalar* sc, rustsecp256k1_v0_10_0_ge* pt, size_t idx, void* data) { + const rustsecp256k1_v0_10_0_scalar* indata = (const rustsecp256k1_v0_10_0_scalar*)data; *sc = *indata; - *pt = rustsecp256k1_v0_9_2_ge_const_g; + *pt = rustsecp256k1_v0_10_0_ge_const_g; CHECK(idx == 0); return 1; } -static void test_ecmult_accumulate(rustsecp256k1_v0_9_2_sha256* acc, const rustsecp256k1_v0_9_2_scalar* x, rustsecp256k1_v0_9_2_scratch* scratch) { +static void test_ecmult_accumulate(rustsecp256k1_v0_10_0_sha256* acc, const rustsecp256k1_v0_10_0_scalar* x, rustsecp256k1_v0_10_0_scratch* scratch) { /* Compute x*G in 6 different ways, serialize it uncompressed, and feed it into acc. */ - rustsecp256k1_v0_9_2_gej rj1, rj2, rj3, rj4, rj5, rj6, gj, infj; - rustsecp256k1_v0_9_2_ge r; + rustsecp256k1_v0_10_0_gej rj1, rj2, rj3, rj4, rj5, rj6, gj, infj; + rustsecp256k1_v0_10_0_ge r; unsigned char bytes[65]; size_t size = 65; - rustsecp256k1_v0_9_2_gej_set_ge(&gj, &rustsecp256k1_v0_9_2_ge_const_g); - rustsecp256k1_v0_9_2_gej_set_infinity(&infj); - rustsecp256k1_v0_9_2_ecmult_gen(&CTX->ecmult_gen_ctx, &rj1, x); - rustsecp256k1_v0_9_2_ecmult(&rj2, &gj, x, &rustsecp256k1_v0_9_2_scalar_zero); - rustsecp256k1_v0_9_2_ecmult(&rj3, &infj, &rustsecp256k1_v0_9_2_scalar_zero, x); - rustsecp256k1_v0_9_2_ecmult_multi_var(NULL, scratch, &rj4, x, NULL, NULL, 0); - rustsecp256k1_v0_9_2_ecmult_multi_var(NULL, scratch, &rj5, &rustsecp256k1_v0_9_2_scalar_zero, test_ecmult_accumulate_cb, (void*)x, 1); - rustsecp256k1_v0_9_2_ecmult_const(&rj6, &rustsecp256k1_v0_9_2_ge_const_g, x); - rustsecp256k1_v0_9_2_ge_set_gej_var(&r, &rj1); - ge_equals_gej(&r, &rj2); - ge_equals_gej(&r, &rj3); - ge_equals_gej(&r, &rj4); - ge_equals_gej(&r, &rj5); - ge_equals_gej(&r, &rj6); - if (rustsecp256k1_v0_9_2_ge_is_infinity(&r)) { + rustsecp256k1_v0_10_0_gej_set_ge(&gj, &rustsecp256k1_v0_10_0_ge_const_g); + rustsecp256k1_v0_10_0_gej_set_infinity(&infj); + rustsecp256k1_v0_10_0_ecmult_gen(&CTX->ecmult_gen_ctx, &rj1, x); + rustsecp256k1_v0_10_0_ecmult(&rj2, &gj, x, &rustsecp256k1_v0_10_0_scalar_zero); + rustsecp256k1_v0_10_0_ecmult(&rj3, &infj, &rustsecp256k1_v0_10_0_scalar_zero, x); + rustsecp256k1_v0_10_0_ecmult_multi_var(NULL, scratch, &rj4, x, NULL, NULL, 0); + rustsecp256k1_v0_10_0_ecmult_multi_var(NULL, scratch, &rj5, &rustsecp256k1_v0_10_0_scalar_zero, test_ecmult_accumulate_cb, (void*)x, 1); + rustsecp256k1_v0_10_0_ecmult_const(&rj6, &rustsecp256k1_v0_10_0_ge_const_g, x); + rustsecp256k1_v0_10_0_ge_set_gej_var(&r, &rj1); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&rj2, &r)); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&rj3, &r)); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&rj4, &r)); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&rj5, &r)); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&rj6, &r)); + if (rustsecp256k1_v0_10_0_ge_is_infinity(&r)) { /* Store infinity as 0x00 */ const unsigned char zerobyte[1] = {0}; - rustsecp256k1_v0_9_2_sha256_write(acc, zerobyte, 1); + rustsecp256k1_v0_10_0_sha256_write(acc, zerobyte, 1); } else { /* Store other points using their uncompressed serialization. */ - rustsecp256k1_v0_9_2_eckey_pubkey_serialize(&r, bytes, &size, 0); + rustsecp256k1_v0_10_0_eckey_pubkey_serialize(&r, bytes, &size, 0); CHECK(size == 65); - rustsecp256k1_v0_9_2_sha256_write(acc, bytes, size); + rustsecp256k1_v0_10_0_sha256_write(acc, bytes, size); } } @@ -5520,11 +5440,11 @@ static void test_ecmult_constants_2bit(void) { * - For j in 1..255 (only odd values): * - Key (j*2^i) mod order */ - rustsecp256k1_v0_9_2_scalar x; - rustsecp256k1_v0_9_2_sha256 acc; + rustsecp256k1_v0_10_0_scalar x; + rustsecp256k1_v0_10_0_sha256 acc; unsigned char b32[32]; int i, j; - rustsecp256k1_v0_9_2_scratch_space *scratch = rustsecp256k1_v0_9_2_scratch_space_create(CTX, 65536); + rustsecp256k1_v0_10_0_scratch_space *scratch = rustsecp256k1_v0_10_0_scratch_space_create(CTX, 65536); /* Expected hash of all the computed points; created with an independent * implementation. */ @@ -5534,25 +5454,25 @@ static void test_ecmult_constants_2bit(void) { 0x3a, 0x75, 0x87, 0x60, 0x1a, 0xf9, 0x63, 0x60, 0xd0, 0xcb, 0x1f, 0xaa, 0x85, 0x9a, 0xb7, 0xb4 }; - rustsecp256k1_v0_9_2_sha256_initialize(&acc); + rustsecp256k1_v0_10_0_sha256_initialize(&acc); for (i = 0; i <= 36; ++i) { - rustsecp256k1_v0_9_2_scalar_set_int(&x, i); + rustsecp256k1_v0_10_0_scalar_set_int(&x, i); test_ecmult_accumulate(&acc, &x, scratch); - rustsecp256k1_v0_9_2_scalar_negate(&x, &x); + rustsecp256k1_v0_10_0_scalar_negate(&x, &x); test_ecmult_accumulate(&acc, &x, scratch); }; for (i = 0; i < 256; ++i) { for (j = 1; j < 256; j += 2) { int k; - rustsecp256k1_v0_9_2_scalar_set_int(&x, j); - for (k = 0; k < i; ++k) rustsecp256k1_v0_9_2_scalar_add(&x, &x, &x); + rustsecp256k1_v0_10_0_scalar_set_int(&x, j); + for (k = 0; k < i; ++k) rustsecp256k1_v0_10_0_scalar_add(&x, &x, &x); test_ecmult_accumulate(&acc, &x, scratch); } } - rustsecp256k1_v0_9_2_sha256_finalize(&acc, b32); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(b32, expected32, 32) == 0); + rustsecp256k1_v0_10_0_sha256_finalize(&acc, b32); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(b32, expected32, 32) == 0); - rustsecp256k1_v0_9_2_scratch_space_destroy(CTX, scratch); + rustsecp256k1_v0_10_0_scratch_space_destroy(CTX, scratch); } static void test_ecmult_constants_sha(uint32_t prefix, size_t iter, const unsigned char* expected32) { @@ -5563,39 +5483,39 @@ static void test_ecmult_constants_sha(uint32_t prefix, size_t iter, const unsign * - For i in range(iter): * - Key SHA256(LE32(prefix) || LE16(i)) */ - rustsecp256k1_v0_9_2_scalar x; - rustsecp256k1_v0_9_2_sha256 acc; + rustsecp256k1_v0_10_0_scalar x; + rustsecp256k1_v0_10_0_sha256 acc; unsigned char b32[32]; unsigned char inp[6]; size_t i; - rustsecp256k1_v0_9_2_scratch_space *scratch = rustsecp256k1_v0_9_2_scratch_space_create(CTX, 65536); + rustsecp256k1_v0_10_0_scratch_space *scratch = rustsecp256k1_v0_10_0_scratch_space_create(CTX, 65536); inp[0] = prefix & 0xFF; inp[1] = (prefix >> 8) & 0xFF; inp[2] = (prefix >> 16) & 0xFF; inp[3] = (prefix >> 24) & 0xFF; - rustsecp256k1_v0_9_2_sha256_initialize(&acc); - rustsecp256k1_v0_9_2_scalar_set_int(&x, 0); + rustsecp256k1_v0_10_0_sha256_initialize(&acc); + rustsecp256k1_v0_10_0_scalar_set_int(&x, 0); test_ecmult_accumulate(&acc, &x, scratch); - rustsecp256k1_v0_9_2_scalar_set_int(&x, 1); + rustsecp256k1_v0_10_0_scalar_set_int(&x, 1); test_ecmult_accumulate(&acc, &x, scratch); - rustsecp256k1_v0_9_2_scalar_negate(&x, &x); + rustsecp256k1_v0_10_0_scalar_negate(&x, &x); test_ecmult_accumulate(&acc, &x, scratch); for (i = 0; i < iter; ++i) { - rustsecp256k1_v0_9_2_sha256 gen; + rustsecp256k1_v0_10_0_sha256 gen; inp[4] = i & 0xff; inp[5] = (i >> 8) & 0xff; - rustsecp256k1_v0_9_2_sha256_initialize(&gen); - rustsecp256k1_v0_9_2_sha256_write(&gen, inp, sizeof(inp)); - rustsecp256k1_v0_9_2_sha256_finalize(&gen, b32); - rustsecp256k1_v0_9_2_scalar_set_b32(&x, b32, NULL); + rustsecp256k1_v0_10_0_sha256_initialize(&gen); + rustsecp256k1_v0_10_0_sha256_write(&gen, inp, sizeof(inp)); + rustsecp256k1_v0_10_0_sha256_finalize(&gen, b32); + rustsecp256k1_v0_10_0_scalar_set_b32(&x, b32, NULL); test_ecmult_accumulate(&acc, &x, scratch); } - rustsecp256k1_v0_9_2_sha256_finalize(&acc, b32); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(b32, expected32, 32) == 0); + rustsecp256k1_v0_10_0_sha256_finalize(&acc, b32); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(b32, expected32, 32) == 0); - rustsecp256k1_v0_9_2_scratch_space_destroy(CTX, scratch); + rustsecp256k1_v0_10_0_scratch_space_destroy(CTX, scratch); } static void run_ecmult_constants(void) { @@ -5634,36 +5554,36 @@ static void run_ecmult_constants(void) { static void test_ecmult_gen_blind(void) { /* Test ecmult_gen() blinding and confirm that the blinding changes, the affine points match, and the z's don't match. */ - rustsecp256k1_v0_9_2_scalar key; - rustsecp256k1_v0_9_2_scalar b; + rustsecp256k1_v0_10_0_scalar key; + rustsecp256k1_v0_10_0_scalar b; unsigned char seed32[32]; - rustsecp256k1_v0_9_2_gej pgej; - rustsecp256k1_v0_9_2_gej pgej2; - rustsecp256k1_v0_9_2_gej i; - rustsecp256k1_v0_9_2_ge pge; + rustsecp256k1_v0_10_0_gej pgej; + rustsecp256k1_v0_10_0_gej pgej2; + rustsecp256k1_v0_10_0_gej i; + rustsecp256k1_v0_10_0_ge pge; random_scalar_order_test(&key); - rustsecp256k1_v0_9_2_ecmult_gen(&CTX->ecmult_gen_ctx, &pgej, &key); - rustsecp256k1_v0_9_2_testrand256(seed32); + rustsecp256k1_v0_10_0_ecmult_gen(&CTX->ecmult_gen_ctx, &pgej, &key); + rustsecp256k1_v0_10_0_testrand256(seed32); b = CTX->ecmult_gen_ctx.blind; i = CTX->ecmult_gen_ctx.initial; - rustsecp256k1_v0_9_2_ecmult_gen_blind(&CTX->ecmult_gen_ctx, seed32); - CHECK(!rustsecp256k1_v0_9_2_scalar_eq(&b, &CTX->ecmult_gen_ctx.blind)); - rustsecp256k1_v0_9_2_ecmult_gen(&CTX->ecmult_gen_ctx, &pgej2, &key); + rustsecp256k1_v0_10_0_ecmult_gen_blind(&CTX->ecmult_gen_ctx, seed32); + CHECK(!rustsecp256k1_v0_10_0_scalar_eq(&b, &CTX->ecmult_gen_ctx.blind)); + rustsecp256k1_v0_10_0_ecmult_gen(&CTX->ecmult_gen_ctx, &pgej2, &key); CHECK(!gej_xyz_equals_gej(&pgej, &pgej2)); CHECK(!gej_xyz_equals_gej(&i, &CTX->ecmult_gen_ctx.initial)); - rustsecp256k1_v0_9_2_ge_set_gej(&pge, &pgej); - ge_equals_gej(&pge, &pgej2); + rustsecp256k1_v0_10_0_ge_set_gej(&pge, &pgej); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&pgej2, &pge)); } static void test_ecmult_gen_blind_reset(void) { /* Test ecmult_gen() blinding reset and confirm that the blinding is consistent. */ - rustsecp256k1_v0_9_2_scalar b; - rustsecp256k1_v0_9_2_gej initial; - rustsecp256k1_v0_9_2_ecmult_gen_blind(&CTX->ecmult_gen_ctx, 0); + rustsecp256k1_v0_10_0_scalar b; + rustsecp256k1_v0_10_0_gej initial; + rustsecp256k1_v0_10_0_ecmult_gen_blind(&CTX->ecmult_gen_ctx, 0); b = CTX->ecmult_gen_ctx.blind; initial = CTX->ecmult_gen_ctx.initial; - rustsecp256k1_v0_9_2_ecmult_gen_blind(&CTX->ecmult_gen_ctx, 0); - CHECK(rustsecp256k1_v0_9_2_scalar_eq(&b, &CTX->ecmult_gen_ctx.blind)); + rustsecp256k1_v0_10_0_ecmult_gen_blind(&CTX->ecmult_gen_ctx, 0); + CHECK(rustsecp256k1_v0_10_0_scalar_eq(&b, &CTX->ecmult_gen_ctx.blind)); CHECK(gej_xyz_equals_gej(&initial, &CTX->ecmult_gen_ctx.initial)); } @@ -5676,46 +5596,46 @@ static void run_ecmult_gen_blind(void) { } /***** ENDOMORPHISH TESTS *****/ -static void test_scalar_split(const rustsecp256k1_v0_9_2_scalar* full) { - rustsecp256k1_v0_9_2_scalar s, s1, slam; +static void test_scalar_split(const rustsecp256k1_v0_10_0_scalar* full) { + rustsecp256k1_v0_10_0_scalar s, s1, slam; const unsigned char zero[32] = {0}; unsigned char tmp[32]; - rustsecp256k1_v0_9_2_scalar_split_lambda(&s1, &slam, full); + rustsecp256k1_v0_10_0_scalar_split_lambda(&s1, &slam, full); /* check slam*lambda + s1 == full */ - rustsecp256k1_v0_9_2_scalar_mul(&s, &rustsecp256k1_v0_9_2_const_lambda, &slam); - rustsecp256k1_v0_9_2_scalar_add(&s, &s, &s1); - CHECK(rustsecp256k1_v0_9_2_scalar_eq(&s, full)); + rustsecp256k1_v0_10_0_scalar_mul(&s, &rustsecp256k1_v0_10_0_const_lambda, &slam); + rustsecp256k1_v0_10_0_scalar_add(&s, &s, &s1); + CHECK(rustsecp256k1_v0_10_0_scalar_eq(&s, full)); /* check that both are <= 128 bits in size */ - if (rustsecp256k1_v0_9_2_scalar_is_high(&s1)) { - rustsecp256k1_v0_9_2_scalar_negate(&s1, &s1); + if (rustsecp256k1_v0_10_0_scalar_is_high(&s1)) { + rustsecp256k1_v0_10_0_scalar_negate(&s1, &s1); } - if (rustsecp256k1_v0_9_2_scalar_is_high(&slam)) { - rustsecp256k1_v0_9_2_scalar_negate(&slam, &slam); + if (rustsecp256k1_v0_10_0_scalar_is_high(&slam)) { + rustsecp256k1_v0_10_0_scalar_negate(&slam, &slam); } - rustsecp256k1_v0_9_2_scalar_get_b32(tmp, &s1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(zero, tmp, 16) == 0); - rustsecp256k1_v0_9_2_scalar_get_b32(tmp, &slam); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(zero, tmp, 16) == 0); + rustsecp256k1_v0_10_0_scalar_get_b32(tmp, &s1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(zero, tmp, 16) == 0); + rustsecp256k1_v0_10_0_scalar_get_b32(tmp, &slam); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(zero, tmp, 16) == 0); } static void run_endomorphism_tests(void) { unsigned i; - static rustsecp256k1_v0_9_2_scalar s; - test_scalar_split(&rustsecp256k1_v0_9_2_scalar_zero); - test_scalar_split(&rustsecp256k1_v0_9_2_scalar_one); - rustsecp256k1_v0_9_2_scalar_negate(&s,&rustsecp256k1_v0_9_2_scalar_one); + static rustsecp256k1_v0_10_0_scalar s; + test_scalar_split(&rustsecp256k1_v0_10_0_scalar_zero); + test_scalar_split(&rustsecp256k1_v0_10_0_scalar_one); + rustsecp256k1_v0_10_0_scalar_negate(&s,&rustsecp256k1_v0_10_0_scalar_one); test_scalar_split(&s); - test_scalar_split(&rustsecp256k1_v0_9_2_const_lambda); - rustsecp256k1_v0_9_2_scalar_add(&s, &rustsecp256k1_v0_9_2_const_lambda, &rustsecp256k1_v0_9_2_scalar_one); + test_scalar_split(&rustsecp256k1_v0_10_0_const_lambda); + rustsecp256k1_v0_10_0_scalar_add(&s, &rustsecp256k1_v0_10_0_const_lambda, &rustsecp256k1_v0_10_0_scalar_one); test_scalar_split(&s); for (i = 0; i < 100U * COUNT; ++i) { - rustsecp256k1_v0_9_2_scalar full; + rustsecp256k1_v0_10_0_scalar full; random_scalar_order_test(&full); test_scalar_split(&full); } @@ -5726,12 +5646,10 @@ static void run_endomorphism_tests(void) { static void ec_pubkey_parse_pointtest(const unsigned char *input, int xvalid, int yvalid) { unsigned char pubkeyc[65]; - rustsecp256k1_v0_9_2_pubkey pubkey; - rustsecp256k1_v0_9_2_ge ge; + rustsecp256k1_v0_10_0_pubkey pubkey; + rustsecp256k1_v0_10_0_ge ge; size_t pubkeyclen; - int32_t ecount; - ecount = 0; - rustsecp256k1_v0_9_2_context_set_illegal_callback(CTX, counting_illegal_callback_fn, &ecount); + for (pubkeyclen = 3; pubkeyclen <= 65; pubkeyclen++) { /* Smaller sizes are tested exhaustively elsewhere. */ int32_t i; @@ -5756,47 +5674,41 @@ static void ec_pubkey_parse_pointtest(const unsigned char *input, int xvalid, in size_t outl; memset(&pubkey, 0, sizeof(pubkey)); SECP256K1_CHECKMEM_UNDEFINE(&pubkey, sizeof(pubkey)); - ecount = 0; - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_parse(CTX, &pubkey, pubkeyc, pubkeyclen) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_parse(CTX, &pubkey, pubkeyc, pubkeyclen) == 1); SECP256K1_CHECKMEM_CHECK(&pubkey, sizeof(pubkey)); outl = 65; SECP256K1_CHECKMEM_UNDEFINE(pubkeyo, 65); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_serialize(CTX, pubkeyo, &outl, &pubkey, SECP256K1_EC_COMPRESSED) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_serialize(CTX, pubkeyo, &outl, &pubkey, SECP256K1_EC_COMPRESSED) == 1); SECP256K1_CHECKMEM_CHECK(pubkeyo, outl); CHECK(outl == 33); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pubkeyo[1], &pubkeyc[1], 32) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pubkeyo[1], &pubkeyc[1], 32) == 0); CHECK((pubkeyclen != 33) || (pubkeyo[0] == pubkeyc[0])); if (ypass) { /* This test isn't always done because we decode with alternative signs, so the y won't match. */ CHECK(pubkeyo[0] == ysign); - CHECK(rustsecp256k1_v0_9_2_pubkey_load(CTX, &ge, &pubkey) == 1); + CHECK(rustsecp256k1_v0_10_0_pubkey_load(CTX, &ge, &pubkey) == 1); memset(&pubkey, 0, sizeof(pubkey)); SECP256K1_CHECKMEM_UNDEFINE(&pubkey, sizeof(pubkey)); - rustsecp256k1_v0_9_2_pubkey_save(&pubkey, &ge); + rustsecp256k1_v0_10_0_pubkey_save(&pubkey, &ge); SECP256K1_CHECKMEM_CHECK(&pubkey, sizeof(pubkey)); outl = 65; SECP256K1_CHECKMEM_UNDEFINE(pubkeyo, 65); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_serialize(CTX, pubkeyo, &outl, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_serialize(CTX, pubkeyo, &outl, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 1); SECP256K1_CHECKMEM_CHECK(pubkeyo, outl); CHECK(outl == 65); CHECK(pubkeyo[0] == 4); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pubkeyo[1], input, 64) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pubkeyo[1], input, 64) == 0); } - CHECK(ecount == 0); } else { /* These cases must fail to parse. */ memset(&pubkey, 0xfe, sizeof(pubkey)); - ecount = 0; SECP256K1_CHECKMEM_UNDEFINE(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_parse(CTX, &pubkey, pubkeyc, pubkeyclen) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_parse(CTX, &pubkey, pubkeyc, pubkeyclen) == 0); SECP256K1_CHECKMEM_CHECK(&pubkey, sizeof(pubkey)); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_9_2_pubkey_load(CTX, &ge, &pubkey) == 0); - CHECK(ecount == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_pubkey_load(CTX, &ge, &pubkey)); } } } - rustsecp256k1_v0_9_2_context_set_illegal_callback(CTX, NULL, NULL); } static void run_ec_pubkey_parse_test(void) { @@ -5979,142 +5891,99 @@ static void run_ec_pubkey_parse_test(void) { 0xB8, 0x00 }; unsigned char sout[65]; - unsigned char shortkey[2]; - rustsecp256k1_v0_9_2_ge ge; - rustsecp256k1_v0_9_2_pubkey pubkey; + unsigned char shortkey[2] = { 0 }; + rustsecp256k1_v0_10_0_ge ge; + rustsecp256k1_v0_10_0_pubkey pubkey; size_t len; int32_t i; - int32_t ecount; - int32_t ecount2; - ecount = 0; + /* Nothing should be reading this far into pubkeyc. */ SECP256K1_CHECKMEM_UNDEFINE(&pubkeyc[65], 1); - rustsecp256k1_v0_9_2_context_set_illegal_callback(CTX, counting_illegal_callback_fn, &ecount); /* Zero length claimed, fail, zeroize, no illegal arg error. */ memset(&pubkey, 0xfe, sizeof(pubkey)); - ecount = 0; SECP256K1_CHECKMEM_UNDEFINE(shortkey, 2); SECP256K1_CHECKMEM_UNDEFINE(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_parse(CTX, &pubkey, shortkey, 0) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_parse(CTX, &pubkey, shortkey, 0) == 0); SECP256K1_CHECKMEM_CHECK(&pubkey, sizeof(pubkey)); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_9_2_pubkey_load(CTX, &ge, &pubkey) == 0); - CHECK(ecount == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_pubkey_load(CTX, &ge, &pubkey)); /* Length one claimed, fail, zeroize, no illegal arg error. */ for (i = 0; i < 256 ; i++) { memset(&pubkey, 0xfe, sizeof(pubkey)); - ecount = 0; shortkey[0] = i; SECP256K1_CHECKMEM_UNDEFINE(&shortkey[1], 1); SECP256K1_CHECKMEM_UNDEFINE(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_parse(CTX, &pubkey, shortkey, 1) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_parse(CTX, &pubkey, shortkey, 1) == 0); SECP256K1_CHECKMEM_CHECK(&pubkey, sizeof(pubkey)); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_9_2_pubkey_load(CTX, &ge, &pubkey) == 0); - CHECK(ecount == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_pubkey_load(CTX, &ge, &pubkey)); } /* Length two claimed, fail, zeroize, no illegal arg error. */ for (i = 0; i < 65536 ; i++) { memset(&pubkey, 0xfe, sizeof(pubkey)); - ecount = 0; shortkey[0] = i & 255; shortkey[1] = i >> 8; SECP256K1_CHECKMEM_UNDEFINE(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_parse(CTX, &pubkey, shortkey, 2) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_parse(CTX, &pubkey, shortkey, 2) == 0); SECP256K1_CHECKMEM_CHECK(&pubkey, sizeof(pubkey)); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_9_2_pubkey_load(CTX, &ge, &pubkey) == 0); - CHECK(ecount == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_pubkey_load(CTX, &ge, &pubkey)); } memset(&pubkey, 0xfe, sizeof(pubkey)); - ecount = 0; SECP256K1_CHECKMEM_UNDEFINE(&pubkey, sizeof(pubkey)); /* 33 bytes claimed on otherwise valid input starting with 0x04, fail, zeroize output, no illegal arg error. */ - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_parse(CTX, &pubkey, pubkeyc, 33) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_parse(CTX, &pubkey, pubkeyc, 33) == 0); SECP256K1_CHECKMEM_CHECK(&pubkey, sizeof(pubkey)); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_9_2_pubkey_load(CTX, &ge, &pubkey) == 0); - CHECK(ecount == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_pubkey_load(CTX, &ge, &pubkey)); /* NULL pubkey, illegal arg error. Pubkey isn't rewritten before this step, since it's NULL into the parser. */ - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_parse(CTX, NULL, pubkeyc, 65) == 0); - CHECK(ecount == 2); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ec_pubkey_parse(CTX, NULL, pubkeyc, 65)); /* NULL input string. Illegal arg and zeroize output. */ memset(&pubkey, 0xfe, sizeof(pubkey)); - ecount = 0; SECP256K1_CHECKMEM_UNDEFINE(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_parse(CTX, &pubkey, NULL, 65) == 0); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ec_pubkey_parse(CTX, &pubkey, NULL, 65)); SECP256K1_CHECKMEM_CHECK(&pubkey, sizeof(pubkey)); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_pubkey_load(CTX, &ge, &pubkey) == 0); - CHECK(ecount == 2); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_pubkey_load(CTX, &ge, &pubkey)); /* 64 bytes claimed on input starting with 0x04, fail, zeroize output, no illegal arg error. */ memset(&pubkey, 0xfe, sizeof(pubkey)); - ecount = 0; SECP256K1_CHECKMEM_UNDEFINE(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_parse(CTX, &pubkey, pubkeyc, 64) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_parse(CTX, &pubkey, pubkeyc, 64) == 0); SECP256K1_CHECKMEM_CHECK(&pubkey, sizeof(pubkey)); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_9_2_pubkey_load(CTX, &ge, &pubkey) == 0); - CHECK(ecount == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_pubkey_load(CTX, &ge, &pubkey)); /* 66 bytes claimed, fail, zeroize output, no illegal arg error. */ memset(&pubkey, 0xfe, sizeof(pubkey)); - ecount = 0; SECP256K1_CHECKMEM_UNDEFINE(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_parse(CTX, &pubkey, pubkeyc, 66) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_parse(CTX, &pubkey, pubkeyc, 66) == 0); SECP256K1_CHECKMEM_CHECK(&pubkey, sizeof(pubkey)); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_9_2_pubkey_load(CTX, &ge, &pubkey) == 0); - CHECK(ecount == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_pubkey_load(CTX, &ge, &pubkey)); /* Valid parse. */ memset(&pubkey, 0, sizeof(pubkey)); - ecount = 0; SECP256K1_CHECKMEM_UNDEFINE(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_parse(CTX, &pubkey, pubkeyc, 65) == 1); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_parse(rustsecp256k1_v0_9_2_context_static, &pubkey, pubkeyc, 65) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_parse(CTX, &pubkey, pubkeyc, 65) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_parse(rustsecp256k1_v0_10_0_context_static, &pubkey, pubkeyc, 65) == 1); SECP256K1_CHECKMEM_CHECK(&pubkey, sizeof(pubkey)); - CHECK(ecount == 0); SECP256K1_CHECKMEM_UNDEFINE(&ge, sizeof(ge)); - CHECK(rustsecp256k1_v0_9_2_pubkey_load(CTX, &ge, &pubkey) == 1); + CHECK(rustsecp256k1_v0_10_0_pubkey_load(CTX, &ge, &pubkey) == 1); SECP256K1_CHECKMEM_CHECK(&ge.x, sizeof(ge.x)); SECP256K1_CHECKMEM_CHECK(&ge.y, sizeof(ge.y)); SECP256K1_CHECKMEM_CHECK(&ge.infinity, sizeof(ge.infinity)); - ge_equals_ge(&rustsecp256k1_v0_9_2_ge_const_g, &ge); - CHECK(ecount == 0); - /* rustsecp256k1_v0_9_2_ec_pubkey_serialize illegal args. */ - ecount = 0; + CHECK(rustsecp256k1_v0_10_0_ge_eq_var(&ge, &rustsecp256k1_v0_10_0_ge_const_g)); + /* rustsecp256k1_v0_10_0_ec_pubkey_serialize illegal args. */ len = 65; - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_serialize(CTX, NULL, &len, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 0); - CHECK(ecount == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ec_pubkey_serialize(CTX, NULL, &len, &pubkey, SECP256K1_EC_UNCOMPRESSED)); CHECK(len == 0); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_serialize(CTX, sout, NULL, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 0); - CHECK(ecount == 2); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ec_pubkey_serialize(CTX, sout, NULL, &pubkey, SECP256K1_EC_UNCOMPRESSED)); len = 65; SECP256K1_CHECKMEM_UNDEFINE(sout, 65); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_serialize(CTX, sout, &len, NULL, SECP256K1_EC_UNCOMPRESSED) == 0); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ec_pubkey_serialize(CTX, sout, &len, NULL, SECP256K1_EC_UNCOMPRESSED)); SECP256K1_CHECKMEM_CHECK(sout, 65); - CHECK(ecount == 3); CHECK(len == 0); len = 65; - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_serialize(CTX, sout, &len, &pubkey, ~0) == 0); - CHECK(ecount == 4); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ec_pubkey_serialize(CTX, sout, &len, &pubkey, ~0)); CHECK(len == 0); len = 65; SECP256K1_CHECKMEM_UNDEFINE(sout, 65); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_serialize(CTX, sout, &len, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_serialize(CTX, sout, &len, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 1); SECP256K1_CHECKMEM_CHECK(sout, 65); - CHECK(ecount == 4); CHECK(len == 65); /* Multiple illegal args. Should still set arg error only once. */ - ecount = 0; - ecount2 = 11; - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_parse(CTX, NULL, NULL, 65) == 0); - CHECK(ecount == 1); - /* Does the illegal arg callback actually change the behavior? */ - rustsecp256k1_v0_9_2_context_set_illegal_callback(CTX, uncounting_illegal_callback_fn, &ecount2); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_parse(CTX, NULL, NULL, 65) == 0); - CHECK(ecount == 1); - CHECK(ecount2 == 10); - rustsecp256k1_v0_9_2_context_set_illegal_callback(CTX, NULL, NULL); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ec_pubkey_parse(CTX, NULL, NULL, 65)); /* Try a bunch of prefabbed points with all possible encodings. */ for (i = 0; i < SECP256K1_EC_PARSE_TEST_NVALID; i++) { ec_pubkey_parse_pointtest(valid[i], 1, 1); @@ -6134,253 +6003,219 @@ static void run_eckey_edge_case_test(void) { 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x41 }; - const unsigned char zeros[sizeof(rustsecp256k1_v0_9_2_pubkey)] = {0x00}; + const unsigned char zeros[sizeof(rustsecp256k1_v0_10_0_pubkey)] = {0x00}; unsigned char ctmp[33]; unsigned char ctmp2[33]; - rustsecp256k1_v0_9_2_pubkey pubkey; - rustsecp256k1_v0_9_2_pubkey pubkey2; - rustsecp256k1_v0_9_2_pubkey pubkey_one; - rustsecp256k1_v0_9_2_pubkey pubkey_negone; - const rustsecp256k1_v0_9_2_pubkey *pubkeys[3]; + rustsecp256k1_v0_10_0_pubkey pubkey; + rustsecp256k1_v0_10_0_pubkey pubkey2; + rustsecp256k1_v0_10_0_pubkey pubkey_one; + rustsecp256k1_v0_10_0_pubkey pubkey_negone; + const rustsecp256k1_v0_10_0_pubkey *pubkeys[3]; size_t len; - int32_t ecount; /* Group order is too large, reject. */ - CHECK(rustsecp256k1_v0_9_2_ec_seckey_verify(CTX, orderc) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_seckey_verify(CTX, orderc) == 0); SECP256K1_CHECKMEM_UNDEFINE(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, &pubkey, orderc) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &pubkey, orderc) == 0); SECP256K1_CHECKMEM_CHECK(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_9_2_pubkey)) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_10_0_pubkey)) == 0); /* Maximum value is too large, reject. */ memset(ctmp, 255, 32); - CHECK(rustsecp256k1_v0_9_2_ec_seckey_verify(CTX, ctmp) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_seckey_verify(CTX, ctmp) == 0); memset(&pubkey, 1, sizeof(pubkey)); SECP256K1_CHECKMEM_UNDEFINE(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, &pubkey, ctmp) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &pubkey, ctmp) == 0); SECP256K1_CHECKMEM_CHECK(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_9_2_pubkey)) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_10_0_pubkey)) == 0); /* Zero is too small, reject. */ memset(ctmp, 0, 32); - CHECK(rustsecp256k1_v0_9_2_ec_seckey_verify(CTX, ctmp) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_seckey_verify(CTX, ctmp) == 0); memset(&pubkey, 1, sizeof(pubkey)); SECP256K1_CHECKMEM_UNDEFINE(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, &pubkey, ctmp) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &pubkey, ctmp) == 0); SECP256K1_CHECKMEM_CHECK(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_9_2_pubkey)) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_10_0_pubkey)) == 0); /* One must be accepted. */ ctmp[31] = 0x01; - CHECK(rustsecp256k1_v0_9_2_ec_seckey_verify(CTX, ctmp) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_seckey_verify(CTX, ctmp) == 1); memset(&pubkey, 0, sizeof(pubkey)); SECP256K1_CHECKMEM_UNDEFINE(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, &pubkey, ctmp) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &pubkey, ctmp) == 1); SECP256K1_CHECKMEM_CHECK(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_9_2_pubkey)) > 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_10_0_pubkey)) > 0); pubkey_one = pubkey; /* Group order + 1 is too large, reject. */ memcpy(ctmp, orderc, 32); ctmp[31] = 0x42; - CHECK(rustsecp256k1_v0_9_2_ec_seckey_verify(CTX, ctmp) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_seckey_verify(CTX, ctmp) == 0); memset(&pubkey, 1, sizeof(pubkey)); SECP256K1_CHECKMEM_UNDEFINE(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, &pubkey, ctmp) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &pubkey, ctmp) == 0); SECP256K1_CHECKMEM_CHECK(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_9_2_pubkey)) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_10_0_pubkey)) == 0); /* -1 must be accepted. */ ctmp[31] = 0x40; - CHECK(rustsecp256k1_v0_9_2_ec_seckey_verify(CTX, ctmp) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_seckey_verify(CTX, ctmp) == 1); memset(&pubkey, 0, sizeof(pubkey)); SECP256K1_CHECKMEM_UNDEFINE(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, &pubkey, ctmp) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &pubkey, ctmp) == 1); SECP256K1_CHECKMEM_CHECK(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_9_2_pubkey)) > 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_10_0_pubkey)) > 0); pubkey_negone = pubkey; /* Tweak of zero leaves the value unchanged. */ memset(ctmp2, 0, 32); - CHECK(rustsecp256k1_v0_9_2_ec_seckey_tweak_add(CTX, ctmp, ctmp2) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(orderc, ctmp, 31) == 0 && ctmp[31] == 0x40); + CHECK(rustsecp256k1_v0_10_0_ec_seckey_tweak_add(CTX, ctmp, ctmp2) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(orderc, ctmp, 31) == 0 && ctmp[31] == 0x40); memcpy(&pubkey2, &pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_tweak_add(CTX, &pubkey, ctmp2) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_tweak_add(CTX, &pubkey, ctmp2) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); /* Multiply tweak of zero zeroizes the output. */ - CHECK(rustsecp256k1_v0_9_2_ec_seckey_tweak_mul(CTX, ctmp, ctmp2) == 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(zeros, ctmp, 32) == 0); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_tweak_mul(CTX, &pubkey, ctmp2) == 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_seckey_tweak_mul(CTX, ctmp, ctmp2) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(zeros, ctmp, 32) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_tweak_mul(CTX, &pubkey, ctmp2) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); memcpy(&pubkey, &pubkey2, sizeof(pubkey)); /* If seckey_tweak_add or seckey_tweak_mul are called with an overflowing seckey, the seckey is zeroized. */ memcpy(ctmp, orderc, 32); memset(ctmp2, 0, 32); ctmp2[31] = 0x01; - CHECK(rustsecp256k1_v0_9_2_ec_seckey_verify(CTX, ctmp2) == 1); - CHECK(rustsecp256k1_v0_9_2_ec_seckey_verify(CTX, ctmp) == 0); - CHECK(rustsecp256k1_v0_9_2_ec_seckey_tweak_add(CTX, ctmp, ctmp2) == 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(zeros, ctmp, 32) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_seckey_verify(CTX, ctmp2) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_seckey_verify(CTX, ctmp) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_seckey_tweak_add(CTX, ctmp, ctmp2) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(zeros, ctmp, 32) == 0); memcpy(ctmp, orderc, 32); - CHECK(rustsecp256k1_v0_9_2_ec_seckey_tweak_mul(CTX, ctmp, ctmp2) == 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(zeros, ctmp, 32) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_seckey_tweak_mul(CTX, ctmp, ctmp2) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(zeros, ctmp, 32) == 0); /* If seckey_tweak_add or seckey_tweak_mul are called with an overflowing tweak, the seckey is zeroized. */ memcpy(ctmp, orderc, 32); ctmp[31] = 0x40; - CHECK(rustsecp256k1_v0_9_2_ec_seckey_tweak_add(CTX, ctmp, orderc) == 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(zeros, ctmp, 32) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_seckey_tweak_add(CTX, ctmp, orderc) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(zeros, ctmp, 32) == 0); memcpy(ctmp, orderc, 32); ctmp[31] = 0x40; - CHECK(rustsecp256k1_v0_9_2_ec_seckey_tweak_mul(CTX, ctmp, orderc) == 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(zeros, ctmp, 32) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_seckey_tweak_mul(CTX, ctmp, orderc) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(zeros, ctmp, 32) == 0); memcpy(ctmp, orderc, 32); ctmp[31] = 0x40; /* If pubkey_tweak_add or pubkey_tweak_mul are called with an overflowing tweak, the pubkey is zeroized. */ - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_tweak_add(CTX, &pubkey, orderc) == 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_tweak_add(CTX, &pubkey, orderc) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); memcpy(&pubkey, &pubkey2, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_tweak_mul(CTX, &pubkey, orderc) == 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_tweak_mul(CTX, &pubkey, orderc) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); memcpy(&pubkey, &pubkey2, sizeof(pubkey)); - /* If the resulting key in rustsecp256k1_v0_9_2_ec_seckey_tweak_add and - * rustsecp256k1_v0_9_2_ec_pubkey_tweak_add is 0 the functions fail and in the latter + /* If the resulting key in rustsecp256k1_v0_10_0_ec_seckey_tweak_add and + * rustsecp256k1_v0_10_0_ec_pubkey_tweak_add is 0 the functions fail and in the latter * case the pubkey is zeroized. */ memcpy(ctmp, orderc, 32); ctmp[31] = 0x40; memset(ctmp2, 0, 32); ctmp2[31] = 1; - CHECK(rustsecp256k1_v0_9_2_ec_seckey_tweak_add(CTX, ctmp2, ctmp) == 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(zeros, ctmp2, 32) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_seckey_tweak_add(CTX, ctmp2, ctmp) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(zeros, ctmp2, 32) == 0); ctmp2[31] = 1; - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_tweak_add(CTX, &pubkey, ctmp2) == 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_tweak_add(CTX, &pubkey, ctmp2) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); memcpy(&pubkey, &pubkey2, sizeof(pubkey)); /* Tweak computation wraps and results in a key of 1. */ ctmp2[31] = 2; - CHECK(rustsecp256k1_v0_9_2_ec_seckey_tweak_add(CTX, ctmp2, ctmp) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(ctmp2, zeros, 31) == 0 && ctmp2[31] == 1); + CHECK(rustsecp256k1_v0_10_0_ec_seckey_tweak_add(CTX, ctmp2, ctmp) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(ctmp2, zeros, 31) == 0 && ctmp2[31] == 1); ctmp2[31] = 2; - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_tweak_add(CTX, &pubkey, ctmp2) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_tweak_add(CTX, &pubkey, ctmp2) == 1); ctmp2[31] = 1; - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, &pubkey2, ctmp2) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &pubkey2, ctmp2) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); /* Tweak mul * 2 = 1+1. */ - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_tweak_add(CTX, &pubkey, ctmp2) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_tweak_add(CTX, &pubkey, ctmp2) == 1); ctmp2[31] = 2; - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_tweak_mul(CTX, &pubkey2, ctmp2) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); - /* Test argument errors. */ - ecount = 0; - rustsecp256k1_v0_9_2_context_set_illegal_callback(CTX, counting_illegal_callback_fn, &ecount); - CHECK(ecount == 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_tweak_mul(CTX, &pubkey2, ctmp2) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); /* Zeroize pubkey on parse error. */ memset(&pubkey, 0, 32); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_tweak_add(CTX, &pubkey, ctmp2) == 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ec_pubkey_tweak_add(CTX, &pubkey, ctmp2)); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); memcpy(&pubkey, &pubkey2, sizeof(pubkey)); memset(&pubkey2, 0, 32); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_tweak_mul(CTX, &pubkey2, ctmp2) == 0); - CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pubkey2, zeros, sizeof(pubkey2)) == 0); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ec_pubkey_tweak_mul(CTX, &pubkey2, ctmp2)); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pubkey2, zeros, sizeof(pubkey2)) == 0); /* Plain argument errors. */ - ecount = 0; - CHECK(rustsecp256k1_v0_9_2_ec_seckey_verify(CTX, ctmp) == 1); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_9_2_ec_seckey_verify(CTX, NULL) == 0); - CHECK(ecount == 1); - ecount = 0; + CHECK(rustsecp256k1_v0_10_0_ec_seckey_verify(CTX, ctmp) == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ec_seckey_verify(CTX, NULL)); memset(ctmp2, 0, 32); ctmp2[31] = 4; - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_tweak_add(CTX, NULL, ctmp2) == 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_tweak_add(CTX, &pubkey, NULL) == 0); - CHECK(ecount == 2); - ecount = 0; + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ec_pubkey_tweak_add(CTX, NULL, ctmp2)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ec_pubkey_tweak_add(CTX, &pubkey, NULL)); memset(ctmp2, 0, 32); ctmp2[31] = 4; - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_tweak_mul(CTX, NULL, ctmp2) == 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_tweak_mul(CTX, &pubkey, NULL) == 0); - CHECK(ecount == 2); - ecount = 0; + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ec_pubkey_tweak_mul(CTX, NULL, ctmp2)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ec_pubkey_tweak_mul(CTX, &pubkey, NULL)); memset(ctmp2, 0, 32); - CHECK(rustsecp256k1_v0_9_2_ec_seckey_tweak_add(CTX, NULL, ctmp2) == 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_ec_seckey_tweak_add(CTX, ctmp, NULL) == 0); - CHECK(ecount == 2); - ecount = 0; + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ec_seckey_tweak_add(CTX, NULL, ctmp2)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ec_seckey_tweak_add(CTX, ctmp, NULL)); memset(ctmp2, 0, 32); ctmp2[31] = 1; - CHECK(rustsecp256k1_v0_9_2_ec_seckey_tweak_mul(CTX, NULL, ctmp2) == 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_ec_seckey_tweak_mul(CTX, ctmp, NULL) == 0); - CHECK(ecount == 2); - ecount = 0; - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, NULL, ctmp) == 0); - CHECK(ecount == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ec_seckey_tweak_mul(CTX, NULL, ctmp2)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ec_seckey_tweak_mul(CTX, ctmp, NULL)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, NULL, ctmp)); memset(&pubkey, 1, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, &pubkey, NULL) == 0); - CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_9_2_pubkey)) == 0); - /* rustsecp256k1_v0_9_2_ec_pubkey_combine tests. */ - ecount = 0; + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &pubkey, NULL)); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_10_0_pubkey)) == 0); + /* rustsecp256k1_v0_10_0_ec_pubkey_combine tests. */ pubkeys[0] = &pubkey_one; - SECP256K1_CHECKMEM_UNDEFINE(&pubkeys[0], sizeof(rustsecp256k1_v0_9_2_pubkey *)); - SECP256K1_CHECKMEM_UNDEFINE(&pubkeys[1], sizeof(rustsecp256k1_v0_9_2_pubkey *)); - SECP256K1_CHECKMEM_UNDEFINE(&pubkeys[2], sizeof(rustsecp256k1_v0_9_2_pubkey *)); - memset(&pubkey, 255, sizeof(rustsecp256k1_v0_9_2_pubkey)); - SECP256K1_CHECKMEM_UNDEFINE(&pubkey, sizeof(rustsecp256k1_v0_9_2_pubkey)); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_combine(CTX, &pubkey, pubkeys, 0) == 0); - SECP256K1_CHECKMEM_CHECK(&pubkey, sizeof(rustsecp256k1_v0_9_2_pubkey)); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_9_2_pubkey)) == 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_combine(CTX, NULL, pubkeys, 1) == 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_9_2_pubkey)) == 0); - CHECK(ecount == 2); - memset(&pubkey, 255, sizeof(rustsecp256k1_v0_9_2_pubkey)); - SECP256K1_CHECKMEM_UNDEFINE(&pubkey, sizeof(rustsecp256k1_v0_9_2_pubkey)); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_combine(CTX, &pubkey, NULL, 1) == 0); - SECP256K1_CHECKMEM_CHECK(&pubkey, sizeof(rustsecp256k1_v0_9_2_pubkey)); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_9_2_pubkey)) == 0); - CHECK(ecount == 3); + SECP256K1_CHECKMEM_UNDEFINE(&pubkeys[0], sizeof(rustsecp256k1_v0_10_0_pubkey *)); + SECP256K1_CHECKMEM_UNDEFINE(&pubkeys[1], sizeof(rustsecp256k1_v0_10_0_pubkey *)); + SECP256K1_CHECKMEM_UNDEFINE(&pubkeys[2], sizeof(rustsecp256k1_v0_10_0_pubkey *)); + memset(&pubkey, 255, sizeof(rustsecp256k1_v0_10_0_pubkey)); + SECP256K1_CHECKMEM_UNDEFINE(&pubkey, sizeof(rustsecp256k1_v0_10_0_pubkey)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ec_pubkey_combine(CTX, &pubkey, pubkeys, 0)); + SECP256K1_CHECKMEM_CHECK(&pubkey, sizeof(rustsecp256k1_v0_10_0_pubkey)); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_10_0_pubkey)) == 0); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ec_pubkey_combine(CTX, NULL, pubkeys, 1)); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_10_0_pubkey)) == 0); + memset(&pubkey, 255, sizeof(rustsecp256k1_v0_10_0_pubkey)); + SECP256K1_CHECKMEM_UNDEFINE(&pubkey, sizeof(rustsecp256k1_v0_10_0_pubkey)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ec_pubkey_combine(CTX, &pubkey, NULL, 1)); + SECP256K1_CHECKMEM_CHECK(&pubkey, sizeof(rustsecp256k1_v0_10_0_pubkey)); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_10_0_pubkey)) == 0); pubkeys[0] = &pubkey_negone; - memset(&pubkey, 255, sizeof(rustsecp256k1_v0_9_2_pubkey)); - SECP256K1_CHECKMEM_UNDEFINE(&pubkey, sizeof(rustsecp256k1_v0_9_2_pubkey)); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_combine(CTX, &pubkey, pubkeys, 1) == 1); - SECP256K1_CHECKMEM_CHECK(&pubkey, sizeof(rustsecp256k1_v0_9_2_pubkey)); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_9_2_pubkey)) > 0); - CHECK(ecount == 3); + memset(&pubkey, 255, sizeof(rustsecp256k1_v0_10_0_pubkey)); + SECP256K1_CHECKMEM_UNDEFINE(&pubkey, sizeof(rustsecp256k1_v0_10_0_pubkey)); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_combine(CTX, &pubkey, pubkeys, 1) == 1); + SECP256K1_CHECKMEM_CHECK(&pubkey, sizeof(rustsecp256k1_v0_10_0_pubkey)); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_10_0_pubkey)) > 0); len = 33; - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_serialize(CTX, ctmp, &len, &pubkey, SECP256K1_EC_COMPRESSED) == 1); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_serialize(CTX, ctmp2, &len, &pubkey_negone, SECP256K1_EC_COMPRESSED) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(ctmp, ctmp2, 33) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_serialize(CTX, ctmp, &len, &pubkey, SECP256K1_EC_COMPRESSED) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_serialize(CTX, ctmp2, &len, &pubkey_negone, SECP256K1_EC_COMPRESSED) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(ctmp, ctmp2, 33) == 0); /* Result is infinity. */ pubkeys[0] = &pubkey_one; pubkeys[1] = &pubkey_negone; - memset(&pubkey, 255, sizeof(rustsecp256k1_v0_9_2_pubkey)); - SECP256K1_CHECKMEM_UNDEFINE(&pubkey, sizeof(rustsecp256k1_v0_9_2_pubkey)); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_combine(CTX, &pubkey, pubkeys, 2) == 0); - SECP256K1_CHECKMEM_CHECK(&pubkey, sizeof(rustsecp256k1_v0_9_2_pubkey)); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_9_2_pubkey)) == 0); - CHECK(ecount == 3); + memset(&pubkey, 255, sizeof(rustsecp256k1_v0_10_0_pubkey)); + SECP256K1_CHECKMEM_UNDEFINE(&pubkey, sizeof(rustsecp256k1_v0_10_0_pubkey)); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_combine(CTX, &pubkey, pubkeys, 2) == 0); + SECP256K1_CHECKMEM_CHECK(&pubkey, sizeof(rustsecp256k1_v0_10_0_pubkey)); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_10_0_pubkey)) == 0); /* Passes through infinity but comes out one. */ pubkeys[2] = &pubkey_one; - memset(&pubkey, 255, sizeof(rustsecp256k1_v0_9_2_pubkey)); - SECP256K1_CHECKMEM_UNDEFINE(&pubkey, sizeof(rustsecp256k1_v0_9_2_pubkey)); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_combine(CTX, &pubkey, pubkeys, 3) == 1); - SECP256K1_CHECKMEM_CHECK(&pubkey, sizeof(rustsecp256k1_v0_9_2_pubkey)); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_9_2_pubkey)) > 0); - CHECK(ecount == 3); + memset(&pubkey, 255, sizeof(rustsecp256k1_v0_10_0_pubkey)); + SECP256K1_CHECKMEM_UNDEFINE(&pubkey, sizeof(rustsecp256k1_v0_10_0_pubkey)); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_combine(CTX, &pubkey, pubkeys, 3) == 1); + SECP256K1_CHECKMEM_CHECK(&pubkey, sizeof(rustsecp256k1_v0_10_0_pubkey)); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_10_0_pubkey)) > 0); len = 33; - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_serialize(CTX, ctmp, &len, &pubkey, SECP256K1_EC_COMPRESSED) == 1); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_serialize(CTX, ctmp2, &len, &pubkey_one, SECP256K1_EC_COMPRESSED) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(ctmp, ctmp2, 33) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_serialize(CTX, ctmp, &len, &pubkey, SECP256K1_EC_COMPRESSED) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_serialize(CTX, ctmp2, &len, &pubkey_one, SECP256K1_EC_COMPRESSED) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(ctmp, ctmp2, 33) == 0); /* Adds to two. */ pubkeys[1] = &pubkey_one; - memset(&pubkey, 255, sizeof(rustsecp256k1_v0_9_2_pubkey)); - SECP256K1_CHECKMEM_UNDEFINE(&pubkey, sizeof(rustsecp256k1_v0_9_2_pubkey)); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_combine(CTX, &pubkey, pubkeys, 2) == 1); - SECP256K1_CHECKMEM_CHECK(&pubkey, sizeof(rustsecp256k1_v0_9_2_pubkey)); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_9_2_pubkey)) > 0); - CHECK(ecount == 3); - rustsecp256k1_v0_9_2_context_set_illegal_callback(CTX, NULL, NULL); + memset(&pubkey, 255, sizeof(rustsecp256k1_v0_10_0_pubkey)); + SECP256K1_CHECKMEM_UNDEFINE(&pubkey, sizeof(rustsecp256k1_v0_10_0_pubkey)); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_combine(CTX, &pubkey, pubkeys, 2) == 1); + SECP256K1_CHECKMEM_CHECK(&pubkey, sizeof(rustsecp256k1_v0_10_0_pubkey)); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_10_0_pubkey)) > 0); } static void run_eckey_negate_test(void) { @@ -6391,22 +6226,22 @@ static void run_eckey_negate_test(void) { memcpy(seckey_tmp, seckey, 32); /* Verify negation changes the key and changes it back */ - CHECK(rustsecp256k1_v0_9_2_ec_seckey_negate(CTX, seckey) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(seckey, seckey_tmp, 32) != 0); - CHECK(rustsecp256k1_v0_9_2_ec_seckey_negate(CTX, seckey) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(seckey, seckey_tmp, 32) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_seckey_negate(CTX, seckey) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(seckey, seckey_tmp, 32) != 0); + CHECK(rustsecp256k1_v0_10_0_ec_seckey_negate(CTX, seckey) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(seckey, seckey_tmp, 32) == 0); /* Check that privkey alias gives same result */ - CHECK(rustsecp256k1_v0_9_2_ec_seckey_negate(CTX, seckey) == 1); - CHECK(rustsecp256k1_v0_9_2_ec_privkey_negate(CTX, seckey_tmp) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(seckey, seckey_tmp, 32) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_seckey_negate(CTX, seckey) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_privkey_negate(CTX, seckey_tmp) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(seckey, seckey_tmp, 32) == 0); /* Negating all 0s fails */ memset(seckey, 0, 32); memset(seckey_tmp, 0, 32); - CHECK(rustsecp256k1_v0_9_2_ec_seckey_negate(CTX, seckey) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_seckey_negate(CTX, seckey) == 0); /* Check that seckey is not modified */ - CHECK(rustsecp256k1_v0_9_2_memcmp_var(seckey, seckey_tmp, 32) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(seckey, seckey_tmp, 32) == 0); /* Negating an overflowing seckey fails and the seckey is zeroed. In this * test, the seckey has 16 random bytes to ensure that ec_seckey_negate @@ -6414,30 +6249,30 @@ static void run_eckey_negate_test(void) { random_scalar_order_b32(seckey); memset(seckey, 0xFF, 16); memset(seckey_tmp, 0, 32); - CHECK(rustsecp256k1_v0_9_2_ec_seckey_negate(CTX, seckey) == 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(seckey, seckey_tmp, 32) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_seckey_negate(CTX, seckey) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(seckey, seckey_tmp, 32) == 0); } -static void random_sign(rustsecp256k1_v0_9_2_scalar *sigr, rustsecp256k1_v0_9_2_scalar *sigs, const rustsecp256k1_v0_9_2_scalar *key, const rustsecp256k1_v0_9_2_scalar *msg, int *recid) { - rustsecp256k1_v0_9_2_scalar nonce; +static void random_sign(rustsecp256k1_v0_10_0_scalar *sigr, rustsecp256k1_v0_10_0_scalar *sigs, const rustsecp256k1_v0_10_0_scalar *key, const rustsecp256k1_v0_10_0_scalar *msg, int *recid) { + rustsecp256k1_v0_10_0_scalar nonce; do { random_scalar_order_test(&nonce); - } while(!rustsecp256k1_v0_9_2_ecdsa_sig_sign(&CTX->ecmult_gen_ctx, sigr, sigs, key, msg, &nonce, recid)); + } while(!rustsecp256k1_v0_10_0_ecdsa_sig_sign(&CTX->ecmult_gen_ctx, sigr, sigs, key, msg, &nonce, recid)); } static void test_ecdsa_sign_verify(void) { - rustsecp256k1_v0_9_2_gej pubj; - rustsecp256k1_v0_9_2_ge pub; - rustsecp256k1_v0_9_2_scalar one; - rustsecp256k1_v0_9_2_scalar msg, key; - rustsecp256k1_v0_9_2_scalar sigr, sigs; + rustsecp256k1_v0_10_0_gej pubj; + rustsecp256k1_v0_10_0_ge pub; + rustsecp256k1_v0_10_0_scalar one; + rustsecp256k1_v0_10_0_scalar msg, key; + rustsecp256k1_v0_10_0_scalar sigr, sigs; int getrec; int recid; random_scalar_order_test(&msg); random_scalar_order_test(&key); - rustsecp256k1_v0_9_2_ecmult_gen(&CTX->ecmult_gen_ctx, &pubj, &key); - rustsecp256k1_v0_9_2_ge_set_gej(&pub, &pubj); - getrec = rustsecp256k1_v0_9_2_testrand_bits(1); + rustsecp256k1_v0_10_0_ecmult_gen(&CTX->ecmult_gen_ctx, &pubj, &key); + rustsecp256k1_v0_10_0_ge_set_gej(&pub, &pubj); + getrec = rustsecp256k1_v0_10_0_testrand_bits(1); /* The specific way in which this conditional is written sidesteps a potential bug in clang. See the commit messages of the commit that introduced this comment for details. */ if (getrec) { @@ -6446,10 +6281,10 @@ static void test_ecdsa_sign_verify(void) { } else { random_sign(&sigr, &sigs, &key, &msg, NULL); } - CHECK(rustsecp256k1_v0_9_2_ecdsa_sig_verify(&sigr, &sigs, &pub, &msg)); - rustsecp256k1_v0_9_2_scalar_set_int(&one, 1); - rustsecp256k1_v0_9_2_scalar_add(&msg, &msg, &one); - CHECK(!rustsecp256k1_v0_9_2_ecdsa_sig_verify(&sigr, &sigs, &pub, &msg)); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sig_verify(&sigr, &sigs, &pub, &msg)); + rustsecp256k1_v0_10_0_scalar_set_int(&one, 1); + rustsecp256k1_v0_10_0_scalar_add(&msg, &msg, &one); + CHECK(!rustsecp256k1_v0_10_0_ecdsa_sig_verify(&sigr, &sigs, &pub, &msg)); } static void run_ecdsa_sign_verify(void) { @@ -6506,9 +6341,9 @@ static int nonce_function_test_retry(unsigned char *nonce32, const unsigned char return nonce_function_rfc6979(nonce32, msg32, key32, algo16, data, counter - 5); } -static int is_empty_signature(const rustsecp256k1_v0_9_2_ecdsa_signature *sig) { - static const unsigned char res[sizeof(rustsecp256k1_v0_9_2_ecdsa_signature)] = {0}; - return rustsecp256k1_v0_9_2_memcmp_var(sig, res, sizeof(rustsecp256k1_v0_9_2_ecdsa_signature)) == 0; +static int is_empty_signature(const rustsecp256k1_v0_10_0_ecdsa_signature *sig) { + static const unsigned char res[sizeof(rustsecp256k1_v0_10_0_ecdsa_signature)] = {0}; + return rustsecp256k1_v0_10_0_memcmp_var(sig, res, sizeof(rustsecp256k1_v0_10_0_ecdsa_signature)) == 0; } static void test_ecdsa_end_to_end(void) { @@ -6516,191 +6351,191 @@ static void test_ecdsa_end_to_end(void) { unsigned char privkey[32]; unsigned char message[32]; unsigned char privkey2[32]; - rustsecp256k1_v0_9_2_ecdsa_signature signature[6]; - rustsecp256k1_v0_9_2_scalar r, s; + rustsecp256k1_v0_10_0_ecdsa_signature signature[6]; + rustsecp256k1_v0_10_0_scalar r, s; unsigned char sig[74]; size_t siglen = 74; unsigned char pubkeyc[65]; size_t pubkeyclen = 65; - rustsecp256k1_v0_9_2_pubkey pubkey; - rustsecp256k1_v0_9_2_pubkey pubkey_tmp; + rustsecp256k1_v0_10_0_pubkey pubkey; + rustsecp256k1_v0_10_0_pubkey pubkey_tmp; unsigned char seckey[300]; size_t seckeylen = 300; /* Generate a random key and message. */ { - rustsecp256k1_v0_9_2_scalar msg, key; + rustsecp256k1_v0_10_0_scalar msg, key; random_scalar_order_test(&msg); random_scalar_order_test(&key); - rustsecp256k1_v0_9_2_scalar_get_b32(privkey, &key); - rustsecp256k1_v0_9_2_scalar_get_b32(message, &msg); + rustsecp256k1_v0_10_0_scalar_get_b32(privkey, &key); + rustsecp256k1_v0_10_0_scalar_get_b32(message, &msg); } /* Construct and verify corresponding public key. */ - CHECK(rustsecp256k1_v0_9_2_ec_seckey_verify(CTX, privkey) == 1); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, &pubkey, privkey) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_seckey_verify(CTX, privkey) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &pubkey, privkey) == 1); /* Verify exporting and importing public key. */ - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_serialize(CTX, pubkeyc, &pubkeyclen, &pubkey, rustsecp256k1_v0_9_2_testrand_bits(1) == 1 ? SECP256K1_EC_COMPRESSED : SECP256K1_EC_UNCOMPRESSED)); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_serialize(CTX, pubkeyc, &pubkeyclen, &pubkey, rustsecp256k1_v0_10_0_testrand_bits(1) == 1 ? SECP256K1_EC_COMPRESSED : SECP256K1_EC_UNCOMPRESSED)); memset(&pubkey, 0, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_parse(CTX, &pubkey, pubkeyc, pubkeyclen) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_parse(CTX, &pubkey, pubkeyc, pubkeyclen) == 1); /* Verify negation changes the key and changes it back */ memcpy(&pubkey_tmp, &pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_negate(CTX, &pubkey_tmp) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pubkey_tmp, &pubkey, sizeof(pubkey)) != 0); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_negate(CTX, &pubkey_tmp) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pubkey_tmp, &pubkey, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_negate(CTX, &pubkey_tmp) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pubkey_tmp, &pubkey, sizeof(pubkey)) != 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_negate(CTX, &pubkey_tmp) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pubkey_tmp, &pubkey, sizeof(pubkey)) == 0); /* Verify private key import and export. */ - CHECK(ec_privkey_export_der(CTX, seckey, &seckeylen, privkey, rustsecp256k1_v0_9_2_testrand_bits(1) == 1)); + CHECK(ec_privkey_export_der(CTX, seckey, &seckeylen, privkey, rustsecp256k1_v0_10_0_testrand_bits(1) == 1)); CHECK(ec_privkey_import_der(CTX, privkey2, seckey, seckeylen) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(privkey, privkey2, 32) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(privkey, privkey2, 32) == 0); /* Optionally tweak the keys using addition. */ - if (rustsecp256k1_v0_9_2_testrand_int(3) == 0) { + if (rustsecp256k1_v0_10_0_testrand_int(3) == 0) { int ret1; int ret2; int ret3; unsigned char rnd[32]; unsigned char privkey_tmp[32]; - rustsecp256k1_v0_9_2_pubkey pubkey2; - rustsecp256k1_v0_9_2_testrand256_test(rnd); + rustsecp256k1_v0_10_0_pubkey pubkey2; + rustsecp256k1_v0_10_0_testrand256_test(rnd); memcpy(privkey_tmp, privkey, 32); - ret1 = rustsecp256k1_v0_9_2_ec_seckey_tweak_add(CTX, privkey, rnd); - ret2 = rustsecp256k1_v0_9_2_ec_pubkey_tweak_add(CTX, &pubkey, rnd); + ret1 = rustsecp256k1_v0_10_0_ec_seckey_tweak_add(CTX, privkey, rnd); + ret2 = rustsecp256k1_v0_10_0_ec_pubkey_tweak_add(CTX, &pubkey, rnd); /* Check that privkey alias gives same result */ - ret3 = rustsecp256k1_v0_9_2_ec_privkey_tweak_add(CTX, privkey_tmp, rnd); + ret3 = rustsecp256k1_v0_10_0_ec_privkey_tweak_add(CTX, privkey_tmp, rnd); CHECK(ret1 == ret2); CHECK(ret2 == ret3); if (ret1 == 0) { return; } - CHECK(rustsecp256k1_v0_9_2_memcmp_var(privkey, privkey_tmp, 32) == 0); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, &pubkey2, privkey) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(privkey, privkey_tmp, 32) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &pubkey2, privkey) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); } /* Optionally tweak the keys using multiplication. */ - if (rustsecp256k1_v0_9_2_testrand_int(3) == 0) { + if (rustsecp256k1_v0_10_0_testrand_int(3) == 0) { int ret1; int ret2; int ret3; unsigned char rnd[32]; unsigned char privkey_tmp[32]; - rustsecp256k1_v0_9_2_pubkey pubkey2; - rustsecp256k1_v0_9_2_testrand256_test(rnd); + rustsecp256k1_v0_10_0_pubkey pubkey2; + rustsecp256k1_v0_10_0_testrand256_test(rnd); memcpy(privkey_tmp, privkey, 32); - ret1 = rustsecp256k1_v0_9_2_ec_seckey_tweak_mul(CTX, privkey, rnd); - ret2 = rustsecp256k1_v0_9_2_ec_pubkey_tweak_mul(CTX, &pubkey, rnd); + ret1 = rustsecp256k1_v0_10_0_ec_seckey_tweak_mul(CTX, privkey, rnd); + ret2 = rustsecp256k1_v0_10_0_ec_pubkey_tweak_mul(CTX, &pubkey, rnd); /* Check that privkey alias gives same result */ - ret3 = rustsecp256k1_v0_9_2_ec_privkey_tweak_mul(CTX, privkey_tmp, rnd); + ret3 = rustsecp256k1_v0_10_0_ec_privkey_tweak_mul(CTX, privkey_tmp, rnd); CHECK(ret1 == ret2); CHECK(ret2 == ret3); if (ret1 == 0) { return; } - CHECK(rustsecp256k1_v0_9_2_memcmp_var(privkey, privkey_tmp, 32) == 0); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, &pubkey2, privkey) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(privkey, privkey_tmp, 32) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &pubkey2, privkey) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); } /* Sign. */ - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign(CTX, &signature[0], message, privkey, NULL, NULL) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign(CTX, &signature[4], message, privkey, NULL, NULL) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign(CTX, &signature[1], message, privkey, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign(CTX, &signature[0], message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign(CTX, &signature[4], message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign(CTX, &signature[1], message, privkey, NULL, extra) == 1); extra[31] = 1; - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign(CTX, &signature[2], message, privkey, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign(CTX, &signature[2], message, privkey, NULL, extra) == 1); extra[31] = 0; extra[0] = 1; - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign(CTX, &signature[3], message, privkey, NULL, extra) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&signature[0], &signature[4], sizeof(signature[0])) == 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&signature[0], &signature[1], sizeof(signature[0])) != 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&signature[0], &signature[2], sizeof(signature[0])) != 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&signature[0], &signature[3], sizeof(signature[0])) != 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&signature[1], &signature[2], sizeof(signature[0])) != 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&signature[1], &signature[3], sizeof(signature[0])) != 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&signature[2], &signature[3], sizeof(signature[0])) != 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign(CTX, &signature[3], message, privkey, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&signature[0], &signature[4], sizeof(signature[0])) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&signature[0], &signature[1], sizeof(signature[0])) != 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&signature[0], &signature[2], sizeof(signature[0])) != 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&signature[0], &signature[3], sizeof(signature[0])) != 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&signature[1], &signature[2], sizeof(signature[0])) != 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&signature[1], &signature[3], sizeof(signature[0])) != 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&signature[2], &signature[3], sizeof(signature[0])) != 0); /* Verify. */ - CHECK(rustsecp256k1_v0_9_2_ecdsa_verify(CTX, &signature[0], message, &pubkey) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_verify(CTX, &signature[1], message, &pubkey) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_verify(CTX, &signature[2], message, &pubkey) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_verify(CTX, &signature[3], message, &pubkey) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_verify(CTX, &signature[0], message, &pubkey) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_verify(CTX, &signature[1], message, &pubkey) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_verify(CTX, &signature[2], message, &pubkey) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_verify(CTX, &signature[3], message, &pubkey) == 1); /* Test lower-S form, malleate, verify and fail, test again, malleate again */ - CHECK(!rustsecp256k1_v0_9_2_ecdsa_signature_normalize(CTX, NULL, &signature[0])); - rustsecp256k1_v0_9_2_ecdsa_signature_load(CTX, &r, &s, &signature[0]); - rustsecp256k1_v0_9_2_scalar_negate(&s, &s); - rustsecp256k1_v0_9_2_ecdsa_signature_save(&signature[5], &r, &s); - CHECK(rustsecp256k1_v0_9_2_ecdsa_verify(CTX, &signature[5], message, &pubkey) == 0); - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_normalize(CTX, NULL, &signature[5])); - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_normalize(CTX, &signature[5], &signature[5])); - CHECK(!rustsecp256k1_v0_9_2_ecdsa_signature_normalize(CTX, NULL, &signature[5])); - CHECK(!rustsecp256k1_v0_9_2_ecdsa_signature_normalize(CTX, &signature[5], &signature[5])); - CHECK(rustsecp256k1_v0_9_2_ecdsa_verify(CTX, &signature[5], message, &pubkey) == 1); - rustsecp256k1_v0_9_2_scalar_negate(&s, &s); - rustsecp256k1_v0_9_2_ecdsa_signature_save(&signature[5], &r, &s); - CHECK(!rustsecp256k1_v0_9_2_ecdsa_signature_normalize(CTX, NULL, &signature[5])); - CHECK(rustsecp256k1_v0_9_2_ecdsa_verify(CTX, &signature[5], message, &pubkey) == 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&signature[5], &signature[0], 64) == 0); + CHECK(!rustsecp256k1_v0_10_0_ecdsa_signature_normalize(CTX, NULL, &signature[0])); + rustsecp256k1_v0_10_0_ecdsa_signature_load(CTX, &r, &s, &signature[0]); + rustsecp256k1_v0_10_0_scalar_negate(&s, &s); + rustsecp256k1_v0_10_0_ecdsa_signature_save(&signature[5], &r, &s); + CHECK(rustsecp256k1_v0_10_0_ecdsa_verify(CTX, &signature[5], message, &pubkey) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_normalize(CTX, NULL, &signature[5])); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_normalize(CTX, &signature[5], &signature[5])); + CHECK(!rustsecp256k1_v0_10_0_ecdsa_signature_normalize(CTX, NULL, &signature[5])); + CHECK(!rustsecp256k1_v0_10_0_ecdsa_signature_normalize(CTX, &signature[5], &signature[5])); + CHECK(rustsecp256k1_v0_10_0_ecdsa_verify(CTX, &signature[5], message, &pubkey) == 1); + rustsecp256k1_v0_10_0_scalar_negate(&s, &s); + rustsecp256k1_v0_10_0_ecdsa_signature_save(&signature[5], &r, &s); + CHECK(!rustsecp256k1_v0_10_0_ecdsa_signature_normalize(CTX, NULL, &signature[5])); + CHECK(rustsecp256k1_v0_10_0_ecdsa_verify(CTX, &signature[5], message, &pubkey) == 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&signature[5], &signature[0], 64) == 0); /* Serialize/parse DER and verify again */ - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_serialize_der(CTX, sig, &siglen, &signature[0]) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_serialize_der(CTX, sig, &siglen, &signature[0]) == 1); memset(&signature[0], 0, sizeof(signature[0])); - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_parse_der(CTX, &signature[0], sig, siglen) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_verify(CTX, &signature[0], message, &pubkey) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_parse_der(CTX, &signature[0], sig, siglen) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_verify(CTX, &signature[0], message, &pubkey) == 1); /* Serialize/destroy/parse DER and verify again. */ siglen = 74; - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_serialize_der(CTX, sig, &siglen, &signature[0]) == 1); - sig[rustsecp256k1_v0_9_2_testrand_int(siglen)] += 1 + rustsecp256k1_v0_9_2_testrand_int(255); - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_parse_der(CTX, &signature[0], sig, siglen) == 0 || - rustsecp256k1_v0_9_2_ecdsa_verify(CTX, &signature[0], message, &pubkey) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_serialize_der(CTX, sig, &siglen, &signature[0]) == 1); + sig[rustsecp256k1_v0_10_0_testrand_int(siglen)] += 1 + rustsecp256k1_v0_10_0_testrand_int(255); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_parse_der(CTX, &signature[0], sig, siglen) == 0 || + rustsecp256k1_v0_10_0_ecdsa_verify(CTX, &signature[0], message, &pubkey) == 0); } static void test_random_pubkeys(void) { - rustsecp256k1_v0_9_2_ge elem; - rustsecp256k1_v0_9_2_ge elem2; + rustsecp256k1_v0_10_0_ge elem; + rustsecp256k1_v0_10_0_ge elem2; unsigned char in[65]; /* Generate some randomly sized pubkeys. */ - size_t len = rustsecp256k1_v0_9_2_testrand_bits(2) == 0 ? 65 : 33; - if (rustsecp256k1_v0_9_2_testrand_bits(2) == 0) { - len = rustsecp256k1_v0_9_2_testrand_bits(6); + size_t len = rustsecp256k1_v0_10_0_testrand_bits(2) == 0 ? 65 : 33; + if (rustsecp256k1_v0_10_0_testrand_bits(2) == 0) { + len = rustsecp256k1_v0_10_0_testrand_bits(6); } if (len == 65) { - in[0] = rustsecp256k1_v0_9_2_testrand_bits(1) ? 4 : (rustsecp256k1_v0_9_2_testrand_bits(1) ? 6 : 7); + in[0] = rustsecp256k1_v0_10_0_testrand_bits(1) ? 4 : (rustsecp256k1_v0_10_0_testrand_bits(1) ? 6 : 7); } else { - in[0] = rustsecp256k1_v0_9_2_testrand_bits(1) ? 2 : 3; + in[0] = rustsecp256k1_v0_10_0_testrand_bits(1) ? 2 : 3; } - if (rustsecp256k1_v0_9_2_testrand_bits(3) == 0) { - in[0] = rustsecp256k1_v0_9_2_testrand_bits(8); + if (rustsecp256k1_v0_10_0_testrand_bits(3) == 0) { + in[0] = rustsecp256k1_v0_10_0_testrand_bits(8); } if (len > 1) { - rustsecp256k1_v0_9_2_testrand256(&in[1]); + rustsecp256k1_v0_10_0_testrand256(&in[1]); } if (len > 33) { - rustsecp256k1_v0_9_2_testrand256(&in[33]); + rustsecp256k1_v0_10_0_testrand256(&in[33]); } - if (rustsecp256k1_v0_9_2_eckey_pubkey_parse(&elem, in, len)) { + if (rustsecp256k1_v0_10_0_eckey_pubkey_parse(&elem, in, len)) { unsigned char out[65]; unsigned char firstb; int res; size_t size = len; firstb = in[0]; /* If the pubkey can be parsed, it should round-trip... */ - CHECK(rustsecp256k1_v0_9_2_eckey_pubkey_serialize(&elem, out, &size, len == 33)); + CHECK(rustsecp256k1_v0_10_0_eckey_pubkey_serialize(&elem, out, &size, len == 33)); CHECK(size == len); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&in[1], &out[1], len-1) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&in[1], &out[1], len-1) == 0); /* ... except for the type of hybrid inputs. */ if ((in[0] != 6) && (in[0] != 7)) { CHECK(in[0] == out[0]); } size = 65; - CHECK(rustsecp256k1_v0_9_2_eckey_pubkey_serialize(&elem, in, &size, 0)); + CHECK(rustsecp256k1_v0_10_0_eckey_pubkey_serialize(&elem, in, &size, 0)); CHECK(size == 65); - CHECK(rustsecp256k1_v0_9_2_eckey_pubkey_parse(&elem2, in, size)); - ge_equals_ge(&elem,&elem2); + CHECK(rustsecp256k1_v0_10_0_eckey_pubkey_parse(&elem2, in, size)); + CHECK(rustsecp256k1_v0_10_0_ge_eq_var(&elem2, &elem)); /* Check that the X9.62 hybrid type is checked. */ - in[0] = rustsecp256k1_v0_9_2_testrand_bits(1) ? 6 : 7; - res = rustsecp256k1_v0_9_2_eckey_pubkey_parse(&elem2, in, size); + in[0] = rustsecp256k1_v0_10_0_testrand_bits(1) ? 6 : 7; + res = rustsecp256k1_v0_10_0_eckey_pubkey_parse(&elem2, in, size); if (firstb == 2 || firstb == 3) { if (in[0] == firstb + 4) { CHECK(res); @@ -6709,9 +6544,9 @@ static void test_random_pubkeys(void) { } } if (res) { - ge_equals_ge(&elem,&elem2); - CHECK(rustsecp256k1_v0_9_2_eckey_pubkey_serialize(&elem, out, &size, 0)); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&in[1], &out[1], 64) == 0); + CHECK(rustsecp256k1_v0_10_0_ge_eq_var(&elem, &elem2)); + CHECK(rustsecp256k1_v0_10_0_eckey_pubkey_serialize(&elem, out, &size, 0)); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&in[1], &out[1], 64) == 0); } } } @@ -6727,42 +6562,38 @@ static void run_pubkey_comparison(void) { 0xde, 0x36, 0x0e, 0x87, 0x59, 0x8f, 0x3c, 0x01, 0x36, 0x2a, 0x2a, 0xb8, 0xc6, 0xf4, 0x5e, 0x4d, 0xb2, 0xc2, 0xd5, 0x03, 0xa7, 0xf9, 0xf1, 0x4f, 0xa8, 0xfa, 0x95, 0xa8, 0xe9, 0x69, 0x76, 0x1c }; - rustsecp256k1_v0_9_2_pubkey pk1; - rustsecp256k1_v0_9_2_pubkey pk2; - int32_t ecount = 0; - - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_parse(CTX, &pk1, pk1_ser, sizeof(pk1_ser)) == 1); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_parse(CTX, &pk2, pk2_ser, sizeof(pk2_ser)) == 1); - - rustsecp256k1_v0_9_2_context_set_illegal_callback(CTX, counting_illegal_callback_fn, &ecount); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_cmp(CTX, NULL, &pk2) < 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_cmp(CTX, &pk1, NULL) > 0); - CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_cmp(CTX, &pk1, &pk2) < 0); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_cmp(CTX, &pk2, &pk1) > 0); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_cmp(CTX, &pk1, &pk1) == 0); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_cmp(CTX, &pk2, &pk2) == 0); - CHECK(ecount == 2); + rustsecp256k1_v0_10_0_pubkey pk1; + rustsecp256k1_v0_10_0_pubkey pk2; + + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_parse(CTX, &pk1, pk1_ser, sizeof(pk1_ser)) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_parse(CTX, &pk2, pk2_ser, sizeof(pk2_ser)) == 1); + + CHECK_ILLEGAL_VOID(CTX, CHECK(rustsecp256k1_v0_10_0_ec_pubkey_cmp(CTX, NULL, &pk2) < 0)); + CHECK_ILLEGAL_VOID(CTX, CHECK(rustsecp256k1_v0_10_0_ec_pubkey_cmp(CTX, &pk1, NULL) > 0)); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_cmp(CTX, &pk1, &pk2) < 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_cmp(CTX, &pk2, &pk1) > 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_cmp(CTX, &pk1, &pk1) == 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_cmp(CTX, &pk2, &pk2) == 0); { - rustsecp256k1_v0_9_2_pubkey pk_tmp; + rustsecp256k1_v0_10_0_pubkey pk_tmp; memset(&pk_tmp, 0, sizeof(pk_tmp)); /* illegal pubkey */ - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_cmp(CTX, &pk_tmp, &pk2) < 0); - CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_cmp(CTX, &pk_tmp, &pk_tmp) == 0); - CHECK(ecount == 5); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_cmp(CTX, &pk2, &pk_tmp) > 0); - CHECK(ecount == 6); + CHECK_ILLEGAL_VOID(CTX, CHECK(rustsecp256k1_v0_10_0_ec_pubkey_cmp(CTX, &pk_tmp, &pk2) < 0)); + { + int32_t ecount = 0; + rustsecp256k1_v0_10_0_context_set_illegal_callback(CTX, counting_callback_fn, &ecount); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_cmp(CTX, &pk_tmp, &pk_tmp) == 0); + CHECK(ecount == 2); + rustsecp256k1_v0_10_0_context_set_illegal_callback(CTX, NULL, NULL); + } + CHECK_ILLEGAL_VOID(CTX, CHECK(rustsecp256k1_v0_10_0_ec_pubkey_cmp(CTX, &pk2, &pk_tmp) > 0)); } - rustsecp256k1_v0_9_2_context_set_illegal_callback(CTX, NULL, NULL); - /* Make pk2 the same as pk1 but with 3 rather than 2. Note that in * an uncompressed encoding, these would have the opposite ordering */ pk1_ser[0] = 3; - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_parse(CTX, &pk2, pk1_ser, sizeof(pk1_ser)) == 1); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_cmp(CTX, &pk1, &pk2) < 0); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_cmp(CTX, &pk2, &pk1) > 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_parse(CTX, &pk2, pk1_ser, sizeof(pk1_ser)) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_cmp(CTX, &pk1, &pk2) < 0); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_cmp(CTX, &pk2, &pk1) > 0); } static void run_random_pubkeys(void) { @@ -6784,36 +6615,36 @@ static int test_ecdsa_der_parse(const unsigned char *sig, size_t siglen, int cer int ret = 0; - rustsecp256k1_v0_9_2_ecdsa_signature sig_der; + rustsecp256k1_v0_10_0_ecdsa_signature sig_der; unsigned char roundtrip_der[2048]; unsigned char compact_der[64]; size_t len_der = 2048; int parsed_der = 0, valid_der = 0, roundtrips_der = 0; - rustsecp256k1_v0_9_2_ecdsa_signature sig_der_lax; + rustsecp256k1_v0_10_0_ecdsa_signature sig_der_lax; unsigned char roundtrip_der_lax[2048]; unsigned char compact_der_lax[64]; size_t len_der_lax = 2048; int parsed_der_lax = 0, valid_der_lax = 0, roundtrips_der_lax = 0; - parsed_der = rustsecp256k1_v0_9_2_ecdsa_signature_parse_der(CTX, &sig_der, sig, siglen); + parsed_der = rustsecp256k1_v0_10_0_ecdsa_signature_parse_der(CTX, &sig_der, sig, siglen); if (parsed_der) { - ret |= (!rustsecp256k1_v0_9_2_ecdsa_signature_serialize_compact(CTX, compact_der, &sig_der)) << 0; - valid_der = (rustsecp256k1_v0_9_2_memcmp_var(compact_der, zeroes, 32) != 0) && (rustsecp256k1_v0_9_2_memcmp_var(compact_der + 32, zeroes, 32) != 0); + ret |= (!rustsecp256k1_v0_10_0_ecdsa_signature_serialize_compact(CTX, compact_der, &sig_der)) << 0; + valid_der = (rustsecp256k1_v0_10_0_memcmp_var(compact_der, zeroes, 32) != 0) && (rustsecp256k1_v0_10_0_memcmp_var(compact_der + 32, zeroes, 32) != 0); } if (valid_der) { - ret |= (!rustsecp256k1_v0_9_2_ecdsa_signature_serialize_der(CTX, roundtrip_der, &len_der, &sig_der)) << 1; - roundtrips_der = (len_der == siglen) && rustsecp256k1_v0_9_2_memcmp_var(roundtrip_der, sig, siglen) == 0; + ret |= (!rustsecp256k1_v0_10_0_ecdsa_signature_serialize_der(CTX, roundtrip_der, &len_der, &sig_der)) << 1; + roundtrips_der = (len_der == siglen) && rustsecp256k1_v0_10_0_memcmp_var(roundtrip_der, sig, siglen) == 0; } - parsed_der_lax = rustsecp256k1_v0_9_2_ecdsa_signature_parse_der_lax(CTX, &sig_der_lax, sig, siglen); + parsed_der_lax = rustsecp256k1_v0_10_0_ecdsa_signature_parse_der_lax(CTX, &sig_der_lax, sig, siglen); if (parsed_der_lax) { - ret |= (!rustsecp256k1_v0_9_2_ecdsa_signature_serialize_compact(CTX, compact_der_lax, &sig_der_lax)) << 10; - valid_der_lax = (rustsecp256k1_v0_9_2_memcmp_var(compact_der_lax, zeroes, 32) != 0) && (rustsecp256k1_v0_9_2_memcmp_var(compact_der_lax + 32, zeroes, 32) != 0); + ret |= (!rustsecp256k1_v0_10_0_ecdsa_signature_serialize_compact(CTX, compact_der_lax, &sig_der_lax)) << 10; + valid_der_lax = (rustsecp256k1_v0_10_0_memcmp_var(compact_der_lax, zeroes, 32) != 0) && (rustsecp256k1_v0_10_0_memcmp_var(compact_der_lax + 32, zeroes, 32) != 0); } if (valid_der_lax) { - ret |= (!rustsecp256k1_v0_9_2_ecdsa_signature_serialize_der(CTX, roundtrip_der_lax, &len_der_lax, &sig_der_lax)) << 11; - roundtrips_der_lax = (len_der_lax == siglen) && rustsecp256k1_v0_9_2_memcmp_var(roundtrip_der_lax, sig, siglen) == 0; + ret |= (!rustsecp256k1_v0_10_0_ecdsa_signature_serialize_der(CTX, roundtrip_der_lax, &len_der_lax, &sig_der_lax)) << 11; + roundtrips_der_lax = (len_der_lax == siglen) && rustsecp256k1_v0_10_0_memcmp_var(roundtrip_der_lax, sig, siglen) == 0; } if (certainly_der) { @@ -6829,7 +6660,7 @@ static int test_ecdsa_der_parse(const unsigned char *sig, size_t siglen, int cer if (valid_der) { ret |= (!roundtrips_der_lax) << 12; ret |= (len_der != len_der_lax) << 13; - ret |= ((len_der != len_der_lax) || (rustsecp256k1_v0_9_2_memcmp_var(roundtrip_der_lax, roundtrip_der, len_der) != 0)) << 14; + ret |= ((len_der != len_der_lax) || (rustsecp256k1_v0_10_0_memcmp_var(roundtrip_der_lax, roundtrip_der, len_der) != 0)) << 14; } ret |= (roundtrips_der != roundtrips_der_lax) << 15; if (parsed_der) { @@ -6853,27 +6684,27 @@ static void assign_big_endian(unsigned char *ptr, size_t ptrlen, uint32_t val) { static void damage_array(unsigned char *sig, size_t *len) { int pos; - int action = rustsecp256k1_v0_9_2_testrand_bits(3); + int action = rustsecp256k1_v0_10_0_testrand_bits(3); if (action < 1 && *len > 3) { /* Delete a byte. */ - pos = rustsecp256k1_v0_9_2_testrand_int(*len); + pos = rustsecp256k1_v0_10_0_testrand_int(*len); memmove(sig + pos, sig + pos + 1, *len - pos - 1); (*len)--; return; } else if (action < 2 && *len < 2048) { /* Insert a byte. */ - pos = rustsecp256k1_v0_9_2_testrand_int(1 + *len); + pos = rustsecp256k1_v0_10_0_testrand_int(1 + *len); memmove(sig + pos + 1, sig + pos, *len - pos); - sig[pos] = rustsecp256k1_v0_9_2_testrand_bits(8); + sig[pos] = rustsecp256k1_v0_10_0_testrand_bits(8); (*len)++; return; } else if (action < 4) { /* Modify a byte. */ - sig[rustsecp256k1_v0_9_2_testrand_int(*len)] += 1 + rustsecp256k1_v0_9_2_testrand_int(255); + sig[rustsecp256k1_v0_10_0_testrand_int(*len)] += 1 + rustsecp256k1_v0_10_0_testrand_int(255); return; } else { /* action < 8 */ /* Modify a bit. */ - sig[rustsecp256k1_v0_9_2_testrand_int(*len)] ^= 1 << rustsecp256k1_v0_9_2_testrand_bits(3); + sig[rustsecp256k1_v0_10_0_testrand_int(*len)] ^= 1 << rustsecp256k1_v0_10_0_testrand_bits(3); return; } } @@ -6886,23 +6717,23 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly int n; *len = 0; - der = rustsecp256k1_v0_9_2_testrand_bits(2) == 0; + der = rustsecp256k1_v0_10_0_testrand_bits(2) == 0; *certainly_der = der; *certainly_not_der = 0; - indet = der ? 0 : rustsecp256k1_v0_9_2_testrand_int(10) == 0; + indet = der ? 0 : rustsecp256k1_v0_10_0_testrand_int(10) == 0; for (n = 0; n < 2; n++) { /* We generate two classes of numbers: nlow==1 "low" ones (up to 32 bytes), nlow==0 "high" ones (32 bytes with 129 top bits set, or larger than 32 bytes) */ - nlow[n] = der ? 1 : (rustsecp256k1_v0_9_2_testrand_bits(3) != 0); + nlow[n] = der ? 1 : (rustsecp256k1_v0_10_0_testrand_bits(3) != 0); /* The length of the number in bytes (the first byte of which will always be nonzero) */ - nlen[n] = nlow[n] ? rustsecp256k1_v0_9_2_testrand_int(33) : 32 + rustsecp256k1_v0_9_2_testrand_int(200) * rustsecp256k1_v0_9_2_testrand_bits(3) / 8; + nlen[n] = nlow[n] ? rustsecp256k1_v0_10_0_testrand_int(33) : 32 + rustsecp256k1_v0_10_0_testrand_int(200) * rustsecp256k1_v0_10_0_testrand_bits(3) / 8; CHECK(nlen[n] <= 232); /* The top bit of the number. */ - nhbit[n] = (nlow[n] == 0 && nlen[n] == 32) ? 1 : (nlen[n] == 0 ? 0 : rustsecp256k1_v0_9_2_testrand_bits(1)); + nhbit[n] = (nlow[n] == 0 && nlen[n] == 32) ? 1 : (nlen[n] == 0 ? 0 : rustsecp256k1_v0_10_0_testrand_bits(1)); /* The top byte of the number (after the potential hardcoded 16 0xFF characters for "high" 32 bytes numbers) */ - nhbyte[n] = nlen[n] == 0 ? 0 : (nhbit[n] ? 128 + rustsecp256k1_v0_9_2_testrand_bits(7) : 1 + rustsecp256k1_v0_9_2_testrand_int(127)); + nhbyte[n] = nlen[n] == 0 ? 0 : (nhbit[n] ? 128 + rustsecp256k1_v0_10_0_testrand_bits(7) : 1 + rustsecp256k1_v0_10_0_testrand_int(127)); /* The number of zero bytes in front of the number (which is 0 or 1 in case of DER, otherwise we extend up to 300 bytes) */ - nzlen[n] = der ? ((nlen[n] == 0 || nhbit[n]) ? 1 : 0) : (nlow[n] ? rustsecp256k1_v0_9_2_testrand_int(3) : rustsecp256k1_v0_9_2_testrand_int(300 - nlen[n]) * rustsecp256k1_v0_9_2_testrand_bits(3) / 8); + nzlen[n] = der ? ((nlen[n] == 0 || nhbit[n]) ? 1 : 0) : (nlow[n] ? rustsecp256k1_v0_10_0_testrand_int(3) : rustsecp256k1_v0_10_0_testrand_int(300 - nlen[n]) * rustsecp256k1_v0_10_0_testrand_bits(3) / 8); if (nzlen[n] > ((nlen[n] == 0 || nhbit[n]) ? 1 : 0)) { *certainly_not_der = 1; } @@ -6911,7 +6742,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly nlenlen[n] = nlen[n] + nzlen[n] < 128 ? 0 : (nlen[n] + nzlen[n] < 256 ? 1 : 2); if (!der) { /* nlenlen[n] max 127 bytes */ - int add = rustsecp256k1_v0_9_2_testrand_int(127 - nlenlen[n]) * rustsecp256k1_v0_9_2_testrand_bits(4) * rustsecp256k1_v0_9_2_testrand_bits(4) / 256; + int add = rustsecp256k1_v0_10_0_testrand_int(127 - nlenlen[n]) * rustsecp256k1_v0_10_0_testrand_bits(4) * rustsecp256k1_v0_10_0_testrand_bits(4) / 256; nlenlen[n] += add; if (add != 0) { *certainly_not_der = 1; @@ -6925,7 +6756,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly CHECK(tlen <= 856); /* The length of the garbage inside the tuple. */ - elen = (der || indet) ? 0 : rustsecp256k1_v0_9_2_testrand_int(980 - tlen) * rustsecp256k1_v0_9_2_testrand_bits(3) / 8; + elen = (der || indet) ? 0 : rustsecp256k1_v0_10_0_testrand_int(980 - tlen) * rustsecp256k1_v0_10_0_testrand_bits(3) / 8; if (elen != 0) { *certainly_not_der = 1; } @@ -6933,7 +6764,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly CHECK(tlen <= 980); /* The length of the garbage after the end of the tuple. */ - glen = der ? 0 : rustsecp256k1_v0_9_2_testrand_int(990 - tlen) * rustsecp256k1_v0_9_2_testrand_bits(3) / 8; + glen = der ? 0 : rustsecp256k1_v0_10_0_testrand_int(990 - tlen) * rustsecp256k1_v0_10_0_testrand_bits(3) / 8; if (glen != 0) { *certainly_not_der = 1; } @@ -6948,7 +6779,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly } else { int tlenlen = tlen < 128 ? 0 : (tlen < 256 ? 1 : 2); if (!der) { - int add = rustsecp256k1_v0_9_2_testrand_int(127 - tlenlen) * rustsecp256k1_v0_9_2_testrand_bits(4) * rustsecp256k1_v0_9_2_testrand_bits(4) / 256; + int add = rustsecp256k1_v0_10_0_testrand_int(127 - tlenlen) * rustsecp256k1_v0_10_0_testrand_bits(4) * rustsecp256k1_v0_10_0_testrand_bits(4) / 256; tlenlen += add; if (add != 0) { *certainly_not_der = 1; @@ -6999,13 +6830,13 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly nlen[n]--; } /* Generate remaining random bytes of number */ - rustsecp256k1_v0_9_2_testrand_bytes_test(sig + *len, nlen[n]); + rustsecp256k1_v0_10_0_testrand_bytes_test(sig + *len, nlen[n]); *len += nlen[n]; nlen[n] = 0; } /* Generate random garbage inside tuple. */ - rustsecp256k1_v0_9_2_testrand_bytes_test(sig + *len, elen); + rustsecp256k1_v0_10_0_testrand_bytes_test(sig + *len, elen); *len += elen; /* Generate end-of-contents bytes. */ @@ -7017,7 +6848,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly CHECK(tlen + glen <= 1121); /* Generate random garbage outside tuple. */ - rustsecp256k1_v0_9_2_testrand_bytes_test(sig + *len, glen); + rustsecp256k1_v0_10_0_testrand_bytes_test(sig + *len, glen); *len += glen; tlen += glen; CHECK(tlen <= 1121); @@ -7058,22 +6889,22 @@ static void run_ecdsa_der_parse(void) { /* Tests several edge cases. */ static void test_ecdsa_edge_cases(void) { int t; - rustsecp256k1_v0_9_2_ecdsa_signature sig; + rustsecp256k1_v0_10_0_ecdsa_signature sig; /* Test the case where ECDSA recomputes a point that is infinity. */ { - rustsecp256k1_v0_9_2_gej keyj; - rustsecp256k1_v0_9_2_ge key; - rustsecp256k1_v0_9_2_scalar msg; - rustsecp256k1_v0_9_2_scalar sr, ss; - rustsecp256k1_v0_9_2_scalar_set_int(&ss, 1); - rustsecp256k1_v0_9_2_scalar_negate(&ss, &ss); - rustsecp256k1_v0_9_2_scalar_inverse(&ss, &ss); - rustsecp256k1_v0_9_2_scalar_set_int(&sr, 1); - rustsecp256k1_v0_9_2_ecmult_gen(&CTX->ecmult_gen_ctx, &keyj, &sr); - rustsecp256k1_v0_9_2_ge_set_gej(&key, &keyj); + rustsecp256k1_v0_10_0_gej keyj; + rustsecp256k1_v0_10_0_ge key; + rustsecp256k1_v0_10_0_scalar msg; + rustsecp256k1_v0_10_0_scalar sr, ss; + rustsecp256k1_v0_10_0_scalar_set_int(&ss, 1); + rustsecp256k1_v0_10_0_scalar_negate(&ss, &ss); + rustsecp256k1_v0_10_0_scalar_inverse(&ss, &ss); + rustsecp256k1_v0_10_0_scalar_set_int(&sr, 1); + rustsecp256k1_v0_10_0_ecmult_gen(&CTX->ecmult_gen_ctx, &keyj, &sr); + rustsecp256k1_v0_10_0_ge_set_gej(&key, &keyj); msg = ss; - CHECK(rustsecp256k1_v0_9_2_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 0); } /* Verify signature with r of zero fails. */ @@ -7085,14 +6916,14 @@ static void test_ecdsa_edge_cases(void) { 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x41 }; - rustsecp256k1_v0_9_2_ge key; - rustsecp256k1_v0_9_2_scalar msg; - rustsecp256k1_v0_9_2_scalar sr, ss; - rustsecp256k1_v0_9_2_scalar_set_int(&ss, 1); - rustsecp256k1_v0_9_2_scalar_set_int(&msg, 0); - rustsecp256k1_v0_9_2_scalar_set_int(&sr, 0); - CHECK(rustsecp256k1_v0_9_2_eckey_pubkey_parse(&key, pubkey_mods_zero, 33)); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sig_verify( &sr, &ss, &key, &msg) == 0); + rustsecp256k1_v0_10_0_ge key; + rustsecp256k1_v0_10_0_scalar msg; + rustsecp256k1_v0_10_0_scalar sr, ss; + rustsecp256k1_v0_10_0_scalar_set_int(&ss, 1); + rustsecp256k1_v0_10_0_scalar_set_int(&msg, 0); + rustsecp256k1_v0_10_0_scalar_set_int(&sr, 0); + CHECK(rustsecp256k1_v0_10_0_eckey_pubkey_parse(&key, pubkey_mods_zero, 33)); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sig_verify( &sr, &ss, &key, &msg) == 0); } /* Verify signature with s of zero fails. */ @@ -7104,14 +6935,14 @@ static void test_ecdsa_edge_cases(void) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 }; - rustsecp256k1_v0_9_2_ge key; - rustsecp256k1_v0_9_2_scalar msg; - rustsecp256k1_v0_9_2_scalar sr, ss; - rustsecp256k1_v0_9_2_scalar_set_int(&ss, 0); - rustsecp256k1_v0_9_2_scalar_set_int(&msg, 0); - rustsecp256k1_v0_9_2_scalar_set_int(&sr, 1); - CHECK(rustsecp256k1_v0_9_2_eckey_pubkey_parse(&key, pubkey, 33)); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 0); + rustsecp256k1_v0_10_0_ge key; + rustsecp256k1_v0_10_0_scalar msg; + rustsecp256k1_v0_10_0_scalar sr, ss; + rustsecp256k1_v0_10_0_scalar_set_int(&ss, 0); + rustsecp256k1_v0_10_0_scalar_set_int(&msg, 0); + rustsecp256k1_v0_10_0_scalar_set_int(&sr, 1); + CHECK(rustsecp256k1_v0_10_0_eckey_pubkey_parse(&key, pubkey, 33)); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 0); } /* Verify signature with message 0 passes. */ @@ -7130,23 +6961,23 @@ static void test_ecdsa_edge_cases(void) { 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x43 }; - rustsecp256k1_v0_9_2_ge key; - rustsecp256k1_v0_9_2_ge key2; - rustsecp256k1_v0_9_2_scalar msg; - rustsecp256k1_v0_9_2_scalar sr, ss; - rustsecp256k1_v0_9_2_scalar_set_int(&ss, 2); - rustsecp256k1_v0_9_2_scalar_set_int(&msg, 0); - rustsecp256k1_v0_9_2_scalar_set_int(&sr, 2); - CHECK(rustsecp256k1_v0_9_2_eckey_pubkey_parse(&key, pubkey, 33)); - CHECK(rustsecp256k1_v0_9_2_eckey_pubkey_parse(&key2, pubkey2, 33)); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sig_verify(&sr, &ss, &key2, &msg) == 1); - rustsecp256k1_v0_9_2_scalar_negate(&ss, &ss); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sig_verify(&sr, &ss, &key2, &msg) == 1); - rustsecp256k1_v0_9_2_scalar_set_int(&ss, 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 0); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sig_verify(&sr, &ss, &key2, &msg) == 0); + rustsecp256k1_v0_10_0_ge key; + rustsecp256k1_v0_10_0_ge key2; + rustsecp256k1_v0_10_0_scalar msg; + rustsecp256k1_v0_10_0_scalar sr, ss; + rustsecp256k1_v0_10_0_scalar_set_int(&ss, 2); + rustsecp256k1_v0_10_0_scalar_set_int(&msg, 0); + rustsecp256k1_v0_10_0_scalar_set_int(&sr, 2); + CHECK(rustsecp256k1_v0_10_0_eckey_pubkey_parse(&key, pubkey, 33)); + CHECK(rustsecp256k1_v0_10_0_eckey_pubkey_parse(&key2, pubkey2, 33)); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sig_verify(&sr, &ss, &key2, &msg) == 1); + rustsecp256k1_v0_10_0_scalar_negate(&ss, &ss); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sig_verify(&sr, &ss, &key2, &msg) == 1); + rustsecp256k1_v0_10_0_scalar_set_int(&ss, 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sig_verify(&sr, &ss, &key2, &msg) == 0); } /* Verify signature with message 1 passes. */ @@ -7171,24 +7002,24 @@ static void test_ecdsa_edge_cases(void) { 0x45, 0x51, 0x23, 0x19, 0x50, 0xb7, 0x5f, 0xc4, 0x40, 0x2d, 0xa1, 0x72, 0x2f, 0xc9, 0xba, 0xeb }; - rustsecp256k1_v0_9_2_ge key; - rustsecp256k1_v0_9_2_ge key2; - rustsecp256k1_v0_9_2_scalar msg; - rustsecp256k1_v0_9_2_scalar sr, ss; - rustsecp256k1_v0_9_2_scalar_set_int(&ss, 1); - rustsecp256k1_v0_9_2_scalar_set_int(&msg, 1); - rustsecp256k1_v0_9_2_scalar_set_b32(&sr, csr, NULL); - CHECK(rustsecp256k1_v0_9_2_eckey_pubkey_parse(&key, pubkey, 33)); - CHECK(rustsecp256k1_v0_9_2_eckey_pubkey_parse(&key2, pubkey2, 33)); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sig_verify(&sr, &ss, &key2, &msg) == 1); - rustsecp256k1_v0_9_2_scalar_negate(&ss, &ss); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sig_verify(&sr, &ss, &key2, &msg) == 1); - rustsecp256k1_v0_9_2_scalar_set_int(&ss, 2); - rustsecp256k1_v0_9_2_scalar_inverse_var(&ss, &ss); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 0); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sig_verify(&sr, &ss, &key2, &msg) == 0); + rustsecp256k1_v0_10_0_ge key; + rustsecp256k1_v0_10_0_ge key2; + rustsecp256k1_v0_10_0_scalar msg; + rustsecp256k1_v0_10_0_scalar sr, ss; + rustsecp256k1_v0_10_0_scalar_set_int(&ss, 1); + rustsecp256k1_v0_10_0_scalar_set_int(&msg, 1); + rustsecp256k1_v0_10_0_scalar_set_b32(&sr, csr, NULL); + CHECK(rustsecp256k1_v0_10_0_eckey_pubkey_parse(&key, pubkey, 33)); + CHECK(rustsecp256k1_v0_10_0_eckey_pubkey_parse(&key2, pubkey2, 33)); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sig_verify(&sr, &ss, &key2, &msg) == 1); + rustsecp256k1_v0_10_0_scalar_negate(&ss, &ss); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sig_verify(&sr, &ss, &key2, &msg) == 1); + rustsecp256k1_v0_10_0_scalar_set_int(&ss, 2); + rustsecp256k1_v0_10_0_scalar_inverse_var(&ss, &ss); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sig_verify(&sr, &ss, &key2, &msg) == 0); } /* Verify signature with message -1 passes. */ @@ -7206,27 +7037,26 @@ static void test_ecdsa_edge_cases(void) { 0x45, 0x51, 0x23, 0x19, 0x50, 0xb7, 0x5f, 0xc4, 0x40, 0x2d, 0xa1, 0x72, 0x2f, 0xc9, 0xba, 0xee }; - rustsecp256k1_v0_9_2_ge key; - rustsecp256k1_v0_9_2_scalar msg; - rustsecp256k1_v0_9_2_scalar sr, ss; - rustsecp256k1_v0_9_2_scalar_set_int(&ss, 1); - rustsecp256k1_v0_9_2_scalar_set_int(&msg, 1); - rustsecp256k1_v0_9_2_scalar_negate(&msg, &msg); - rustsecp256k1_v0_9_2_scalar_set_b32(&sr, csr, NULL); - CHECK(rustsecp256k1_v0_9_2_eckey_pubkey_parse(&key, pubkey, 33)); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 1); - rustsecp256k1_v0_9_2_scalar_negate(&ss, &ss); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 1); - rustsecp256k1_v0_9_2_scalar_set_int(&ss, 3); - rustsecp256k1_v0_9_2_scalar_inverse_var(&ss, &ss); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 0); + rustsecp256k1_v0_10_0_ge key; + rustsecp256k1_v0_10_0_scalar msg; + rustsecp256k1_v0_10_0_scalar sr, ss; + rustsecp256k1_v0_10_0_scalar_set_int(&ss, 1); + rustsecp256k1_v0_10_0_scalar_set_int(&msg, 1); + rustsecp256k1_v0_10_0_scalar_negate(&msg, &msg); + rustsecp256k1_v0_10_0_scalar_set_b32(&sr, csr, NULL); + CHECK(rustsecp256k1_v0_10_0_eckey_pubkey_parse(&key, pubkey, 33)); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 1); + rustsecp256k1_v0_10_0_scalar_negate(&ss, &ss); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 1); + rustsecp256k1_v0_10_0_scalar_set_int(&ss, 3); + rustsecp256k1_v0_10_0_scalar_inverse_var(&ss, &ss); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 0); } /* Signature where s would be zero. */ { - rustsecp256k1_v0_9_2_pubkey pubkey; + rustsecp256k1_v0_10_0_pubkey pubkey; size_t siglen; - int32_t ecount; unsigned char signature[72]; static const unsigned char nonce[32] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -7252,72 +7082,42 @@ static void test_ecdsa_edge_cases(void) { 0xb8, 0x12, 0xe0, 0x0b, 0x81, 0x7a, 0x77, 0x62, 0x65, 0xdf, 0xdd, 0x31, 0xb9, 0x3e, 0x29, 0xa9, }; - ecount = 0; - rustsecp256k1_v0_9_2_context_set_illegal_callback(CTX, counting_illegal_callback_fn, &ecount); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign(CTX, &sig, msg, key, precomputed_nonce_function, nonce) == 0); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign(CTX, &sig, msg, key, precomputed_nonce_function, nonce2) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign(CTX, &sig, msg, key, precomputed_nonce_function, nonce) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign(CTX, &sig, msg, key, precomputed_nonce_function, nonce2) == 0); msg[31] = 0xaa; - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign(CTX, &sig, msg, key, precomputed_nonce_function, nonce) == 1); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign(CTX, NULL, msg, key, precomputed_nonce_function, nonce2) == 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign(CTX, &sig, NULL, key, precomputed_nonce_function, nonce2) == 0); - CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign(CTX, &sig, msg, NULL, precomputed_nonce_function, nonce2) == 0); - CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign(CTX, &sig, msg, key, precomputed_nonce_function, nonce2) == 1); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, &pubkey, key) == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_verify(CTX, NULL, msg, &pubkey) == 0); - CHECK(ecount == 4); - CHECK(rustsecp256k1_v0_9_2_ecdsa_verify(CTX, &sig, NULL, &pubkey) == 0); - CHECK(ecount == 5); - CHECK(rustsecp256k1_v0_9_2_ecdsa_verify(CTX, &sig, msg, NULL) == 0); - CHECK(ecount == 6); - CHECK(rustsecp256k1_v0_9_2_ecdsa_verify(CTX, &sig, msg, &pubkey) == 1); - CHECK(ecount == 6); - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_create(CTX, &pubkey, NULL) == 0); - CHECK(ecount == 7); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign(CTX, &sig, msg, key, precomputed_nonce_function, nonce) == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_sign(CTX, NULL, msg, key, precomputed_nonce_function, nonce2)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_sign(CTX, &sig, NULL, key, precomputed_nonce_function, nonce2)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_sign(CTX, &sig, msg, NULL, precomputed_nonce_function, nonce2)); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign(CTX, &sig, msg, key, precomputed_nonce_function, nonce2) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &pubkey, key) == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_verify(CTX, NULL, msg, &pubkey)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_verify(CTX, &sig, NULL, &pubkey)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_verify(CTX, &sig, msg, NULL)); + CHECK(rustsecp256k1_v0_10_0_ecdsa_verify(CTX, &sig, msg, &pubkey) == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ec_pubkey_create(CTX, &pubkey, NULL)); /* That pubkeyload fails via an ARGCHECK is a little odd but makes sense because pubkeys are an opaque data type. */ - CHECK(rustsecp256k1_v0_9_2_ecdsa_verify(CTX, &sig, msg, &pubkey) == 0); - CHECK(ecount == 8); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_verify(CTX, &sig, msg, &pubkey)); siglen = 72; - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_serialize_der(CTX, NULL, &siglen, &sig) == 0); - CHECK(ecount == 9); - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_serialize_der(CTX, signature, NULL, &sig) == 0); - CHECK(ecount == 10); - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_serialize_der(CTX, signature, &siglen, NULL) == 0); - CHECK(ecount == 11); - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_serialize_der(CTX, signature, &siglen, &sig) == 1); - CHECK(ecount == 11); - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_parse_der(CTX, NULL, signature, siglen) == 0); - CHECK(ecount == 12); - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_parse_der(CTX, &sig, NULL, siglen) == 0); - CHECK(ecount == 13); - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_parse_der(CTX, &sig, signature, siglen) == 1); - CHECK(ecount == 13); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_signature_serialize_der(CTX, NULL, &siglen, &sig)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_signature_serialize_der(CTX, signature, NULL, &sig)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_signature_serialize_der(CTX, signature, &siglen, NULL)); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_serialize_der(CTX, signature, &siglen, &sig) == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_signature_parse_der(CTX, NULL, signature, siglen)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_signature_parse_der(CTX, &sig, NULL, siglen)); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_parse_der(CTX, &sig, signature, siglen) == 1); siglen = 10; /* Too little room for a signature does not fail via ARGCHECK. */ - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_serialize_der(CTX, signature, &siglen, &sig) == 0); - CHECK(ecount == 13); - ecount = 0; - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_normalize(CTX, NULL, NULL) == 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_serialize_compact(CTX, NULL, &sig) == 0); - CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_serialize_compact(CTX, signature, NULL) == 0); - CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_serialize_compact(CTX, signature, &sig) == 1); - CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_parse_compact(CTX, NULL, signature) == 0); - CHECK(ecount == 4); - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_parse_compact(CTX, &sig, NULL) == 0); - CHECK(ecount == 5); - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_parse_compact(CTX, &sig, signature) == 1); - CHECK(ecount == 5); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_serialize_der(CTX, signature, &siglen, &sig) == 0); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_signature_normalize(CTX, NULL, NULL)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_signature_serialize_compact(CTX, NULL, &sig)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_signature_serialize_compact(CTX, signature, NULL)); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_serialize_compact(CTX, signature, &sig) == 1); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_signature_parse_compact(CTX, NULL, signature)); + CHECK_ILLEGAL(CTX, rustsecp256k1_v0_10_0_ecdsa_signature_parse_compact(CTX, &sig, NULL)); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_parse_compact(CTX, &sig, signature) == 1); memset(signature, 255, 64); - CHECK(rustsecp256k1_v0_9_2_ecdsa_signature_parse_compact(CTX, &sig, signature) == 0); - CHECK(ecount == 5); - rustsecp256k1_v0_9_2_context_set_illegal_callback(CTX, NULL, NULL); + CHECK(rustsecp256k1_v0_10_0_ecdsa_signature_parse_compact(CTX, &sig, signature) == 0); } /* Nonce function corner cases. */ @@ -7326,43 +7126,43 @@ static void test_ecdsa_edge_cases(void) { int i; unsigned char key[32]; unsigned char msg[32]; - rustsecp256k1_v0_9_2_ecdsa_signature sig2; - rustsecp256k1_v0_9_2_scalar sr[512], ss; + rustsecp256k1_v0_10_0_ecdsa_signature sig2; + rustsecp256k1_v0_10_0_scalar sr[512], ss; const unsigned char *extra; extra = t == 0 ? NULL : zero; memset(msg, 0, 32); msg[31] = 1; /* High key results in signature failure. */ memset(key, 0xFF, 32); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign(CTX, &sig, msg, key, NULL, extra) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign(CTX, &sig, msg, key, NULL, extra) == 0); CHECK(is_empty_signature(&sig)); /* Zero key results in signature failure. */ memset(key, 0, 32); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign(CTX, &sig, msg, key, NULL, extra) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign(CTX, &sig, msg, key, NULL, extra) == 0); CHECK(is_empty_signature(&sig)); /* Nonce function failure results in signature failure. */ key[31] = 1; - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign(CTX, &sig, msg, key, nonce_function_test_fail, extra) == 0); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign(CTX, &sig, msg, key, nonce_function_test_fail, extra) == 0); CHECK(is_empty_signature(&sig)); /* The retry loop successfully makes its way to the first good value. */ - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign(CTX, &sig, msg, key, nonce_function_test_retry, extra) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign(CTX, &sig, msg, key, nonce_function_test_retry, extra) == 1); CHECK(!is_empty_signature(&sig)); - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign(CTX, &sig2, msg, key, nonce_function_rfc6979, extra) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign(CTX, &sig2, msg, key, nonce_function_rfc6979, extra) == 1); CHECK(!is_empty_signature(&sig2)); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&sig, &sig2, sizeof(sig)) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&sig, &sig2, sizeof(sig)) == 0); /* The default nonce function is deterministic. */ - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign(CTX, &sig2, msg, key, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign(CTX, &sig2, msg, key, NULL, extra) == 1); CHECK(!is_empty_signature(&sig2)); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&sig, &sig2, sizeof(sig)) == 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&sig, &sig2, sizeof(sig)) == 0); /* The default nonce function changes output with different messages. */ for(i = 0; i < 256; i++) { int j; msg[0] = i; - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign(CTX, &sig2, msg, key, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign(CTX, &sig2, msg, key, NULL, extra) == 1); CHECK(!is_empty_signature(&sig2)); - rustsecp256k1_v0_9_2_ecdsa_signature_load(CTX, &sr[i], &ss, &sig2); + rustsecp256k1_v0_10_0_ecdsa_signature_load(CTX, &sr[i], &ss, &sig2); for (j = 0; j < i; j++) { - CHECK(!rustsecp256k1_v0_9_2_scalar_eq(&sr[i], &sr[j])); + CHECK(!rustsecp256k1_v0_10_0_scalar_eq(&sr[i], &sr[j])); } } msg[0] = 0; @@ -7371,11 +7171,11 @@ static void test_ecdsa_edge_cases(void) { for(i = 256; i < 512; i++) { int j; key[0] = i - 256; - CHECK(rustsecp256k1_v0_9_2_ecdsa_sign(CTX, &sig2, msg, key, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_10_0_ecdsa_sign(CTX, &sig2, msg, key, NULL, extra) == 1); CHECK(!is_empty_signature(&sig2)); - rustsecp256k1_v0_9_2_ecdsa_signature_load(CTX, &sr[i], &ss, &sig2); + rustsecp256k1_v0_10_0_ecdsa_signature_load(CTX, &sr[i], &ss, &sig2); for (j = 0; j < i; j++) { - CHECK(!rustsecp256k1_v0_9_2_scalar_eq(&sr[i], &sr[j])); + CHECK(!rustsecp256k1_v0_10_0_scalar_eq(&sr[i], &sr[j])); } } key[0] = 0; @@ -7400,12 +7200,12 @@ static void test_ecdsa_edge_cases(void) { SECP256K1_CHECKMEM_CHECK(nonce3,32); CHECK(nonce_function_rfc6979(nonce4, zeros, zeros, zeros, (void *)zeros, 0) == 1); SECP256K1_CHECKMEM_CHECK(nonce4,32); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(nonce, nonce2, 32) != 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(nonce, nonce3, 32) != 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(nonce, nonce4, 32) != 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(nonce2, nonce3, 32) != 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(nonce2, nonce4, 32) != 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(nonce3, nonce4, 32) != 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(nonce, nonce2, 32) != 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(nonce, nonce3, 32) != 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(nonce, nonce4, 32) != 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(nonce2, nonce3, 32) != 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(nonce2, nonce4, 32) != 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(nonce3, nonce4, 32) != 0); } @@ -7434,29 +7234,29 @@ static void run_ecdsa_edge_cases(void) { The tests check for known attacks (range checks in (r,s), arithmetic errors, malleability). */ static void test_ecdsa_wycheproof(void) { - #include "wycheproof/ecdsa_rustsecp256k1_v0_9_2_sha256_bitcoin_test.h" + #include "wycheproof/ecdsa_rustsecp256k1_v0_10_0_sha256_bitcoin_test.h" int t; for (t = 0; t < SECP256K1_ECDSA_WYCHEPROOF_NUMBER_TESTVECTORS; t++) { - rustsecp256k1_v0_9_2_ecdsa_signature signature; - rustsecp256k1_v0_9_2_sha256 hasher; - rustsecp256k1_v0_9_2_pubkey pubkey; + rustsecp256k1_v0_10_0_ecdsa_signature signature; + rustsecp256k1_v0_10_0_sha256 hasher; + rustsecp256k1_v0_10_0_pubkey pubkey; const unsigned char *msg, *sig, *pk; unsigned char out[32] = {0}; int actual_verify = 0; memset(&pubkey, 0, sizeof(pubkey)); pk = &wycheproof_ecdsa_public_keys[testvectors[t].pk_offset]; - CHECK(rustsecp256k1_v0_9_2_ec_pubkey_parse(CTX, &pubkey, pk, 65) == 1); + CHECK(rustsecp256k1_v0_10_0_ec_pubkey_parse(CTX, &pubkey, pk, 65) == 1); - rustsecp256k1_v0_9_2_sha256_initialize(&hasher); + rustsecp256k1_v0_10_0_sha256_initialize(&hasher); msg = &wycheproof_ecdsa_messages[testvectors[t].msg_offset]; - rustsecp256k1_v0_9_2_sha256_write(&hasher, msg, testvectors[t].msg_len); - rustsecp256k1_v0_9_2_sha256_finalize(&hasher, out); + rustsecp256k1_v0_10_0_sha256_write(&hasher, msg, testvectors[t].msg_len); + rustsecp256k1_v0_10_0_sha256_finalize(&hasher, out); sig = &wycheproof_ecdsa_signatures[testvectors[t].sig_offset]; - if (rustsecp256k1_v0_9_2_ecdsa_signature_parse_der(CTX, &signature, sig, testvectors[t].sig_len) == 1) { - actual_verify = rustsecp256k1_v0_9_2_ecdsa_verify(CTX, (const rustsecp256k1_v0_9_2_ecdsa_signature *)&signature, out, &pubkey); + if (rustsecp256k1_v0_10_0_ecdsa_signature_parse_der(CTX, &signature, sig, testvectors[t].sig_len) == 1) { + actual_verify = rustsecp256k1_v0_10_0_ecdsa_verify(CTX, (const rustsecp256k1_v0_10_0_ecdsa_signature *)&signature, out, &pubkey); } CHECK(testvectors[t].expected_verify == actual_verify); } @@ -7487,32 +7287,32 @@ static void run_ecdsa_wycheproof(void) { # include "modules/ellswift/tests_impl.h" #endif -static void run_rustsecp256k1_v0_9_2_memczero_test(void) { +static void run_rustsecp256k1_v0_10_0_memczero_test(void) { unsigned char buf1[6] = {1, 2, 3, 4, 5, 6}; unsigned char buf2[sizeof(buf1)]; - /* rustsecp256k1_v0_9_2_memczero(..., ..., 0) is a noop. */ + /* rustsecp256k1_v0_10_0_memczero(..., ..., 0) is a noop. */ memcpy(buf2, buf1, sizeof(buf1)); - rustsecp256k1_v0_9_2_memczero(buf1, sizeof(buf1), 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(buf1, buf2, sizeof(buf1)) == 0); + rustsecp256k1_v0_10_0_memczero(buf1, sizeof(buf1), 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(buf1, buf2, sizeof(buf1)) == 0); - /* rustsecp256k1_v0_9_2_memczero(..., ..., 1) zeros the buffer. */ + /* rustsecp256k1_v0_10_0_memczero(..., ..., 1) zeros the buffer. */ memset(buf2, 0, sizeof(buf2)); - rustsecp256k1_v0_9_2_memczero(buf1, sizeof(buf1) , 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(buf1, buf2, sizeof(buf1)) == 0); + rustsecp256k1_v0_10_0_memczero(buf1, sizeof(buf1) , 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(buf1, buf2, sizeof(buf1)) == 0); } -static void run_rustsecp256k1_v0_9_2_byteorder_tests(void) { +static void run_rustsecp256k1_v0_10_0_byteorder_tests(void) { { const uint32_t x = 0xFF03AB45; const unsigned char x_be[4] = {0xFF, 0x03, 0xAB, 0x45}; unsigned char buf[4]; uint32_t x_; - rustsecp256k1_v0_9_2_write_be32(buf, x); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(buf, x_be, sizeof(buf)) == 0); + rustsecp256k1_v0_10_0_write_be32(buf, x); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(buf, x_be, sizeof(buf)) == 0); - x_ = rustsecp256k1_v0_9_2_read_be32(buf); + x_ = rustsecp256k1_v0_10_0_read_be32(buf); CHECK(x == x_); } @@ -7522,10 +7322,10 @@ static void run_rustsecp256k1_v0_9_2_byteorder_tests(void) { unsigned char buf[8]; uint64_t x_; - rustsecp256k1_v0_9_2_write_be64(buf, x); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(buf, x_be, sizeof(buf)) == 0); + rustsecp256k1_v0_10_0_write_be64(buf, x); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(buf, x_be, sizeof(buf)) == 0); - x_ = rustsecp256k1_v0_9_2_read_be64(buf); + x_ = rustsecp256k1_v0_10_0_read_be64(buf); CHECK(x == x_); } } @@ -7534,145 +7334,145 @@ static void int_cmov_test(void) { int r = INT_MAX; int a = 0; - rustsecp256k1_v0_9_2_int_cmov(&r, &a, 0); + rustsecp256k1_v0_10_0_int_cmov(&r, &a, 0); CHECK(r == INT_MAX); r = 0; a = INT_MAX; - rustsecp256k1_v0_9_2_int_cmov(&r, &a, 1); + rustsecp256k1_v0_10_0_int_cmov(&r, &a, 1); CHECK(r == INT_MAX); a = 0; - rustsecp256k1_v0_9_2_int_cmov(&r, &a, 1); + rustsecp256k1_v0_10_0_int_cmov(&r, &a, 1); CHECK(r == 0); a = 1; - rustsecp256k1_v0_9_2_int_cmov(&r, &a, 1); + rustsecp256k1_v0_10_0_int_cmov(&r, &a, 1); CHECK(r == 1); r = 1; a = 0; - rustsecp256k1_v0_9_2_int_cmov(&r, &a, 0); + rustsecp256k1_v0_10_0_int_cmov(&r, &a, 0); CHECK(r == 1); } static void fe_cmov_test(void) { - static const rustsecp256k1_v0_9_2_fe zero = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0); - static const rustsecp256k1_v0_9_2_fe one = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1); - static const rustsecp256k1_v0_9_2_fe max = SECP256K1_FE_CONST( + static const rustsecp256k1_v0_10_0_fe zero = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0); + static const rustsecp256k1_v0_10_0_fe one = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1); + static const rustsecp256k1_v0_10_0_fe max = SECP256K1_FE_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL ); - rustsecp256k1_v0_9_2_fe r = max; - rustsecp256k1_v0_9_2_fe a = zero; + rustsecp256k1_v0_10_0_fe r = max; + rustsecp256k1_v0_10_0_fe a = zero; - rustsecp256k1_v0_9_2_fe_cmov(&r, &a, 0); + rustsecp256k1_v0_10_0_fe_cmov(&r, &a, 0); CHECK(fe_identical(&r, &max)); r = zero; a = max; - rustsecp256k1_v0_9_2_fe_cmov(&r, &a, 1); + rustsecp256k1_v0_10_0_fe_cmov(&r, &a, 1); CHECK(fe_identical(&r, &max)); a = zero; - rustsecp256k1_v0_9_2_fe_cmov(&r, &a, 1); + rustsecp256k1_v0_10_0_fe_cmov(&r, &a, 1); CHECK(fe_identical(&r, &zero)); a = one; - rustsecp256k1_v0_9_2_fe_cmov(&r, &a, 1); + rustsecp256k1_v0_10_0_fe_cmov(&r, &a, 1); CHECK(fe_identical(&r, &one)); r = one; a = zero; - rustsecp256k1_v0_9_2_fe_cmov(&r, &a, 0); + rustsecp256k1_v0_10_0_fe_cmov(&r, &a, 0); CHECK(fe_identical(&r, &one)); } static void fe_storage_cmov_test(void) { - static const rustsecp256k1_v0_9_2_fe_storage zero = SECP256K1_FE_STORAGE_CONST(0, 0, 0, 0, 0, 0, 0, 0); - static const rustsecp256k1_v0_9_2_fe_storage one = SECP256K1_FE_STORAGE_CONST(0, 0, 0, 0, 0, 0, 0, 1); - static const rustsecp256k1_v0_9_2_fe_storage max = SECP256K1_FE_STORAGE_CONST( + static const rustsecp256k1_v0_10_0_fe_storage zero = SECP256K1_FE_STORAGE_CONST(0, 0, 0, 0, 0, 0, 0, 0); + static const rustsecp256k1_v0_10_0_fe_storage one = SECP256K1_FE_STORAGE_CONST(0, 0, 0, 0, 0, 0, 0, 1); + static const rustsecp256k1_v0_10_0_fe_storage max = SECP256K1_FE_STORAGE_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL ); - rustsecp256k1_v0_9_2_fe_storage r = max; - rustsecp256k1_v0_9_2_fe_storage a = zero; + rustsecp256k1_v0_10_0_fe_storage r = max; + rustsecp256k1_v0_10_0_fe_storage a = zero; - rustsecp256k1_v0_9_2_fe_storage_cmov(&r, &a, 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&r, &max, sizeof(r)) == 0); + rustsecp256k1_v0_10_0_fe_storage_cmov(&r, &a, 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&r, &max, sizeof(r)) == 0); r = zero; a = max; - rustsecp256k1_v0_9_2_fe_storage_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&r, &max, sizeof(r)) == 0); + rustsecp256k1_v0_10_0_fe_storage_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&r, &max, sizeof(r)) == 0); a = zero; - rustsecp256k1_v0_9_2_fe_storage_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&r, &zero, sizeof(r)) == 0); + rustsecp256k1_v0_10_0_fe_storage_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&r, &zero, sizeof(r)) == 0); a = one; - rustsecp256k1_v0_9_2_fe_storage_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&r, &one, sizeof(r)) == 0); + rustsecp256k1_v0_10_0_fe_storage_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&r, &one, sizeof(r)) == 0); r = one; a = zero; - rustsecp256k1_v0_9_2_fe_storage_cmov(&r, &a, 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&r, &one, sizeof(r)) == 0); + rustsecp256k1_v0_10_0_fe_storage_cmov(&r, &a, 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&r, &one, sizeof(r)) == 0); } static void scalar_cmov_test(void) { - static const rustsecp256k1_v0_9_2_scalar max = SECP256K1_SCALAR_CONST( + static const rustsecp256k1_v0_10_0_scalar max = SECP256K1_SCALAR_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL, 0xBAAEDCE6UL, 0xAF48A03BUL, 0xBFD25E8CUL, 0xD0364140UL ); - rustsecp256k1_v0_9_2_scalar r = max; - rustsecp256k1_v0_9_2_scalar a = rustsecp256k1_v0_9_2_scalar_zero; + rustsecp256k1_v0_10_0_scalar r = max; + rustsecp256k1_v0_10_0_scalar a = rustsecp256k1_v0_10_0_scalar_zero; - rustsecp256k1_v0_9_2_scalar_cmov(&r, &a, 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&r, &max, sizeof(r)) == 0); + rustsecp256k1_v0_10_0_scalar_cmov(&r, &a, 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&r, &max, sizeof(r)) == 0); - r = rustsecp256k1_v0_9_2_scalar_zero; a = max; - rustsecp256k1_v0_9_2_scalar_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&r, &max, sizeof(r)) == 0); + r = rustsecp256k1_v0_10_0_scalar_zero; a = max; + rustsecp256k1_v0_10_0_scalar_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&r, &max, sizeof(r)) == 0); - a = rustsecp256k1_v0_9_2_scalar_zero; - rustsecp256k1_v0_9_2_scalar_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&r, &rustsecp256k1_v0_9_2_scalar_zero, sizeof(r)) == 0); + a = rustsecp256k1_v0_10_0_scalar_zero; + rustsecp256k1_v0_10_0_scalar_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&r, &rustsecp256k1_v0_10_0_scalar_zero, sizeof(r)) == 0); - a = rustsecp256k1_v0_9_2_scalar_one; - rustsecp256k1_v0_9_2_scalar_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&r, &rustsecp256k1_v0_9_2_scalar_one, sizeof(r)) == 0); + a = rustsecp256k1_v0_10_0_scalar_one; + rustsecp256k1_v0_10_0_scalar_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&r, &rustsecp256k1_v0_10_0_scalar_one, sizeof(r)) == 0); - r = rustsecp256k1_v0_9_2_scalar_one; a = rustsecp256k1_v0_9_2_scalar_zero; - rustsecp256k1_v0_9_2_scalar_cmov(&r, &a, 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&r, &rustsecp256k1_v0_9_2_scalar_one, sizeof(r)) == 0); + r = rustsecp256k1_v0_10_0_scalar_one; a = rustsecp256k1_v0_10_0_scalar_zero; + rustsecp256k1_v0_10_0_scalar_cmov(&r, &a, 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&r, &rustsecp256k1_v0_10_0_scalar_one, sizeof(r)) == 0); } static void ge_storage_cmov_test(void) { - static const rustsecp256k1_v0_9_2_ge_storage zero = SECP256K1_GE_STORAGE_CONST(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); - static const rustsecp256k1_v0_9_2_ge_storage one = SECP256K1_GE_STORAGE_CONST(0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1); - static const rustsecp256k1_v0_9_2_ge_storage max = SECP256K1_GE_STORAGE_CONST( + static const rustsecp256k1_v0_10_0_ge_storage zero = SECP256K1_GE_STORAGE_CONST(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + static const rustsecp256k1_v0_10_0_ge_storage one = SECP256K1_GE_STORAGE_CONST(0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1); + static const rustsecp256k1_v0_10_0_ge_storage max = SECP256K1_GE_STORAGE_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL ); - rustsecp256k1_v0_9_2_ge_storage r = max; - rustsecp256k1_v0_9_2_ge_storage a = zero; + rustsecp256k1_v0_10_0_ge_storage r = max; + rustsecp256k1_v0_10_0_ge_storage a = zero; - rustsecp256k1_v0_9_2_ge_storage_cmov(&r, &a, 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&r, &max, sizeof(r)) == 0); + rustsecp256k1_v0_10_0_ge_storage_cmov(&r, &a, 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&r, &max, sizeof(r)) == 0); r = zero; a = max; - rustsecp256k1_v0_9_2_ge_storage_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&r, &max, sizeof(r)) == 0); + rustsecp256k1_v0_10_0_ge_storage_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&r, &max, sizeof(r)) == 0); a = zero; - rustsecp256k1_v0_9_2_ge_storage_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&r, &zero, sizeof(r)) == 0); + rustsecp256k1_v0_10_0_ge_storage_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&r, &zero, sizeof(r)) == 0); a = one; - rustsecp256k1_v0_9_2_ge_storage_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&r, &one, sizeof(r)) == 0); + rustsecp256k1_v0_10_0_ge_storage_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&r, &one, sizeof(r)) == 0); r = one; a = zero; - rustsecp256k1_v0_9_2_ge_storage_cmov(&r, &a, 0); - CHECK(rustsecp256k1_v0_9_2_memcmp_var(&r, &one, sizeof(r)) == 0); + rustsecp256k1_v0_10_0_ge_storage_cmov(&r, &a, 0); + CHECK(rustsecp256k1_v0_10_0_memcmp_var(&r, &one, sizeof(r)) == 0); } static void run_cmov_tests(void) { @@ -7711,28 +7511,28 @@ int main(int argc, char **argv) { run_xoshiro256pp_tests(); /* find random seed */ - rustsecp256k1_v0_9_2_testrand_init(argc > 2 ? argv[2] : NULL); + rustsecp256k1_v0_10_0_testrand_init(argc > 2 ? argv[2] : NULL); /*** Setup test environment ***/ /* Create a global context available to all tests */ - CTX = rustsecp256k1_v0_9_2_context_create(SECP256K1_CONTEXT_NONE); + CTX = rustsecp256k1_v0_10_0_context_create(SECP256K1_CONTEXT_NONE); /* Randomize the context only with probability 15/16 to make sure we test without context randomization from time to time. TODO Reconsider this when recalibrating the tests. */ - if (rustsecp256k1_v0_9_2_testrand_bits(4)) { + if (rustsecp256k1_v0_10_0_testrand_bits(4)) { unsigned char rand32[32]; - rustsecp256k1_v0_9_2_testrand256(rand32); - CHECK(rustsecp256k1_v0_9_2_context_randomize(CTX, rand32)); + rustsecp256k1_v0_10_0_testrand256(rand32); + CHECK(rustsecp256k1_v0_10_0_context_randomize(CTX, rand32)); } - /* Make a writable copy of rustsecp256k1_v0_9_2_context_static in order to test the effect of API functions + /* Make a writable copy of rustsecp256k1_v0_10_0_context_static in order to test the effect of API functions that write to the context. The API does not support cloning the static context, so we use memcpy instead. The user is not supposed to copy a context but we should still ensure that the API functions handle copies of the static context gracefully. */ - STATIC_CTX = malloc(sizeof(*rustsecp256k1_v0_9_2_context_static)); + STATIC_CTX = malloc(sizeof(*rustsecp256k1_v0_10_0_context_static)); CHECK(STATIC_CTX != NULL); - memcpy(STATIC_CTX, rustsecp256k1_v0_9_2_context_static, sizeof(rustsecp256k1_v0_9_2_context)); - CHECK(!rustsecp256k1_v0_9_2_context_is_proper(STATIC_CTX)); + memcpy(STATIC_CTX, rustsecp256k1_v0_10_0_context_static, sizeof(rustsecp256k1_v0_10_0_context)); + CHECK(!rustsecp256k1_v0_10_0_context_is_proper(STATIC_CTX)); /*** Run actual tests ***/ @@ -7836,16 +7636,16 @@ int main(int argc, char **argv) { #endif /* util tests */ - run_rustsecp256k1_v0_9_2_memczero_test(); - run_rustsecp256k1_v0_9_2_byteorder_tests(); + run_rustsecp256k1_v0_10_0_memczero_test(); + run_rustsecp256k1_v0_10_0_byteorder_tests(); run_cmov_tests(); /*** Tear down test environment ***/ free(STATIC_CTX); - rustsecp256k1_v0_9_2_context_destroy(CTX); + rustsecp256k1_v0_10_0_context_destroy(CTX); - rustsecp256k1_v0_9_2_testrand_finish(); + rustsecp256k1_v0_10_0_testrand_finish(); printf("no problems found\n"); return 0; diff --git a/secp256k1-sys/depend/secp256k1/src/tests_exhaustive.c b/secp256k1-sys/depend/secp256k1/src/tests_exhaustive.c index 250d4a1b5..9f4526499 100644 --- a/secp256k1-sys/depend/secp256k1/src/tests_exhaustive.c +++ b/secp256k1-sys/depend/secp256k1/src/tests_exhaustive.c @@ -28,61 +28,11 @@ #include "testrand_impl.h" #include "ecmult_compute_table_impl.h" #include "ecmult_gen_compute_table_impl.h" +#include "testutil.h" #include "util.h" static int count = 2; -/** stolen from tests.c */ -static void ge_equals_ge(const rustsecp256k1_v0_9_2_ge *a, const rustsecp256k1_v0_9_2_ge *b) { - CHECK(a->infinity == b->infinity); - if (a->infinity) { - return; - } - CHECK(rustsecp256k1_v0_9_2_fe_equal(&a->x, &b->x)); - CHECK(rustsecp256k1_v0_9_2_fe_equal(&a->y, &b->y)); -} - -static void ge_equals_gej(const rustsecp256k1_v0_9_2_ge *a, const rustsecp256k1_v0_9_2_gej *b) { - rustsecp256k1_v0_9_2_fe z2s; - rustsecp256k1_v0_9_2_fe u1, u2, s1, s2; - CHECK(a->infinity == b->infinity); - if (a->infinity) { - return; - } - /* Check a.x * b.z^2 == b.x && a.y * b.z^3 == b.y, to avoid inverses. */ - rustsecp256k1_v0_9_2_fe_sqr(&z2s, &b->z); - rustsecp256k1_v0_9_2_fe_mul(&u1, &a->x, &z2s); - u2 = b->x; - rustsecp256k1_v0_9_2_fe_mul(&s1, &a->y, &z2s); rustsecp256k1_v0_9_2_fe_mul(&s1, &s1, &b->z); - s2 = b->y; - CHECK(rustsecp256k1_v0_9_2_fe_equal(&u1, &u2)); - CHECK(rustsecp256k1_v0_9_2_fe_equal(&s1, &s2)); -} - -static void random_fe(rustsecp256k1_v0_9_2_fe *x) { - unsigned char bin[32]; - do { - rustsecp256k1_v0_9_2_testrand256(bin); - if (rustsecp256k1_v0_9_2_fe_set_b32_limit(x, bin)) { - return; - } - } while(1); -} - -static void random_fe_non_zero(rustsecp256k1_v0_9_2_fe *nz) { - int tries = 10; - while (--tries >= 0) { - random_fe(nz); - rustsecp256k1_v0_9_2_fe_normalize(nz); - if (!rustsecp256k1_v0_9_2_fe_is_zero(nz)) { - break; - } - } - /* Infinitesimal probability of spurious failure here */ - CHECK(tries >= 0); -} -/** END stolen from tests.c */ - static uint32_t num_cores = 1; static uint32_t this_core = 0; @@ -92,10 +42,10 @@ SECP256K1_INLINE static int skip_section(uint64_t* iter) { return ((((uint32_t)*iter ^ (*iter >> 32)) * num_cores) >> 32) != this_core; } -static int rustsecp256k1_v0_9_2_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32, +static int rustsecp256k1_v0_10_0_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int attempt) { - rustsecp256k1_v0_9_2_scalar s; + rustsecp256k1_v0_10_0_scalar s; int *idata = data; (void)msg32; (void)key32; @@ -107,95 +57,94 @@ static int rustsecp256k1_v0_9_2_nonce_function_smallint(unsigned char *nonce32, if (attempt > 0) { *idata = (*idata + 1) % EXHAUSTIVE_TEST_ORDER; } - rustsecp256k1_v0_9_2_scalar_set_int(&s, *idata); - rustsecp256k1_v0_9_2_scalar_get_b32(nonce32, &s); + rustsecp256k1_v0_10_0_scalar_set_int(&s, *idata); + rustsecp256k1_v0_10_0_scalar_get_b32(nonce32, &s); return 1; } -static void test_exhaustive_endomorphism(const rustsecp256k1_v0_9_2_ge *group) { +static void test_exhaustive_endomorphism(const rustsecp256k1_v0_10_0_ge *group) { int i; for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { - rustsecp256k1_v0_9_2_ge res; - rustsecp256k1_v0_9_2_ge_mul_lambda(&res, &group[i]); - ge_equals_ge(&group[i * EXHAUSTIVE_TEST_LAMBDA % EXHAUSTIVE_TEST_ORDER], &res); + rustsecp256k1_v0_10_0_ge res; + rustsecp256k1_v0_10_0_ge_mul_lambda(&res, &group[i]); + CHECK(rustsecp256k1_v0_10_0_ge_eq_var(&group[i * EXHAUSTIVE_TEST_LAMBDA % EXHAUSTIVE_TEST_ORDER], &res)); } } -static void test_exhaustive_addition(const rustsecp256k1_v0_9_2_ge *group, const rustsecp256k1_v0_9_2_gej *groupj) { +static void test_exhaustive_addition(const rustsecp256k1_v0_10_0_ge *group, const rustsecp256k1_v0_10_0_gej *groupj) { int i, j; uint64_t iter = 0; /* Sanity-check (and check infinity functions) */ - CHECK(rustsecp256k1_v0_9_2_ge_is_infinity(&group[0])); - CHECK(rustsecp256k1_v0_9_2_gej_is_infinity(&groupj[0])); + CHECK(rustsecp256k1_v0_10_0_ge_is_infinity(&group[0])); + CHECK(rustsecp256k1_v0_10_0_gej_is_infinity(&groupj[0])); for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { - CHECK(!rustsecp256k1_v0_9_2_ge_is_infinity(&group[i])); - CHECK(!rustsecp256k1_v0_9_2_gej_is_infinity(&groupj[i])); + CHECK(!rustsecp256k1_v0_10_0_ge_is_infinity(&group[i])); + CHECK(!rustsecp256k1_v0_10_0_gej_is_infinity(&groupj[i])); } /* Check all addition formulae */ for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) { - rustsecp256k1_v0_9_2_fe fe_inv; + rustsecp256k1_v0_10_0_fe fe_inv; if (skip_section(&iter)) continue; - rustsecp256k1_v0_9_2_fe_inv(&fe_inv, &groupj[j].z); + rustsecp256k1_v0_10_0_fe_inv(&fe_inv, &groupj[j].z); for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { - rustsecp256k1_v0_9_2_ge zless_gej; - rustsecp256k1_v0_9_2_gej tmp; + rustsecp256k1_v0_10_0_ge zless_gej; + rustsecp256k1_v0_10_0_gej tmp; /* add_var */ - rustsecp256k1_v0_9_2_gej_add_var(&tmp, &groupj[i], &groupj[j], NULL); - ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp); + rustsecp256k1_v0_10_0_gej_add_var(&tmp, &groupj[i], &groupj[j], NULL); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&tmp, &group[(i + j) % EXHAUSTIVE_TEST_ORDER])); /* add_ge */ if (j > 0) { - rustsecp256k1_v0_9_2_gej_add_ge(&tmp, &groupj[i], &group[j]); - ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp); + rustsecp256k1_v0_10_0_gej_add_ge(&tmp, &groupj[i], &group[j]); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&tmp, &group[(i + j) % EXHAUSTIVE_TEST_ORDER])); } /* add_ge_var */ - rustsecp256k1_v0_9_2_gej_add_ge_var(&tmp, &groupj[i], &group[j], NULL); - ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp); + rustsecp256k1_v0_10_0_gej_add_ge_var(&tmp, &groupj[i], &group[j], NULL); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&tmp, &group[(i + j) % EXHAUSTIVE_TEST_ORDER])); /* add_zinv_var */ zless_gej.infinity = groupj[j].infinity; zless_gej.x = groupj[j].x; zless_gej.y = groupj[j].y; - rustsecp256k1_v0_9_2_gej_add_zinv_var(&tmp, &groupj[i], &zless_gej, &fe_inv); - ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp); + rustsecp256k1_v0_10_0_gej_add_zinv_var(&tmp, &groupj[i], &zless_gej, &fe_inv); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&tmp, &group[(i + j) % EXHAUSTIVE_TEST_ORDER])); } } /* Check doubling */ for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { - rustsecp256k1_v0_9_2_gej tmp; - rustsecp256k1_v0_9_2_gej_double(&tmp, &groupj[i]); - ge_equals_gej(&group[(2 * i) % EXHAUSTIVE_TEST_ORDER], &tmp); - rustsecp256k1_v0_9_2_gej_double_var(&tmp, &groupj[i], NULL); - ge_equals_gej(&group[(2 * i) % EXHAUSTIVE_TEST_ORDER], &tmp); + rustsecp256k1_v0_10_0_gej tmp; + rustsecp256k1_v0_10_0_gej_double(&tmp, &groupj[i]); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&tmp, &group[(2 * i) % EXHAUSTIVE_TEST_ORDER])); + rustsecp256k1_v0_10_0_gej_double_var(&tmp, &groupj[i], NULL); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&tmp, &group[(2 * i) % EXHAUSTIVE_TEST_ORDER])); } /* Check negation */ for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { - rustsecp256k1_v0_9_2_ge tmp; - rustsecp256k1_v0_9_2_gej tmpj; - rustsecp256k1_v0_9_2_ge_neg(&tmp, &group[i]); - ge_equals_ge(&group[EXHAUSTIVE_TEST_ORDER - i], &tmp); - rustsecp256k1_v0_9_2_gej_neg(&tmpj, &groupj[i]); - ge_equals_gej(&group[EXHAUSTIVE_TEST_ORDER - i], &tmpj); + rustsecp256k1_v0_10_0_ge tmp; + rustsecp256k1_v0_10_0_gej tmpj; + rustsecp256k1_v0_10_0_ge_neg(&tmp, &group[i]); + CHECK(rustsecp256k1_v0_10_0_ge_eq_var(&tmp, &group[EXHAUSTIVE_TEST_ORDER - i])); + rustsecp256k1_v0_10_0_gej_neg(&tmpj, &groupj[i]); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&tmpj, &group[EXHAUSTIVE_TEST_ORDER - i])); } } -static void test_exhaustive_ecmult(const rustsecp256k1_v0_9_2_ge *group, const rustsecp256k1_v0_9_2_gej *groupj) { +static void test_exhaustive_ecmult(const rustsecp256k1_v0_10_0_ge *group, const rustsecp256k1_v0_10_0_gej *groupj) { int i, j, r_log; uint64_t iter = 0; for (r_log = 1; r_log < EXHAUSTIVE_TEST_ORDER; r_log++) { for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) { if (skip_section(&iter)) continue; for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { - rustsecp256k1_v0_9_2_gej tmp; - rustsecp256k1_v0_9_2_scalar na, ng; - rustsecp256k1_v0_9_2_scalar_set_int(&na, i); - rustsecp256k1_v0_9_2_scalar_set_int(&ng, j); - - rustsecp256k1_v0_9_2_ecmult(&tmp, &groupj[r_log], &na, &ng); - ge_equals_gej(&group[(i * r_log + j) % EXHAUSTIVE_TEST_ORDER], &tmp); + rustsecp256k1_v0_10_0_gej tmp; + rustsecp256k1_v0_10_0_scalar na, ng; + rustsecp256k1_v0_10_0_scalar_set_int(&na, i); + rustsecp256k1_v0_10_0_scalar_set_int(&ng, j); + rustsecp256k1_v0_10_0_ecmult(&tmp, &groupj[r_log], &na, &ng); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&tmp, &group[(i * r_log + j) % EXHAUSTIVE_TEST_ORDER])); } } } @@ -203,141 +152,141 @@ static void test_exhaustive_ecmult(const rustsecp256k1_v0_9_2_ge *group, const r for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) { for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { int ret; - rustsecp256k1_v0_9_2_gej tmp; - rustsecp256k1_v0_9_2_fe xn, xd, tmpf; - rustsecp256k1_v0_9_2_scalar ng; + rustsecp256k1_v0_10_0_gej tmp; + rustsecp256k1_v0_10_0_fe xn, xd, tmpf; + rustsecp256k1_v0_10_0_scalar ng; if (skip_section(&iter)) continue; - rustsecp256k1_v0_9_2_scalar_set_int(&ng, j); + rustsecp256k1_v0_10_0_scalar_set_int(&ng, j); - /* Test rustsecp256k1_v0_9_2_ecmult_const. */ - rustsecp256k1_v0_9_2_ecmult_const(&tmp, &group[i], &ng); - ge_equals_gej(&group[(i * j) % EXHAUSTIVE_TEST_ORDER], &tmp); + /* Test rustsecp256k1_v0_10_0_ecmult_const. */ + rustsecp256k1_v0_10_0_ecmult_const(&tmp, &group[i], &ng); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&tmp, &group[(i * j) % EXHAUSTIVE_TEST_ORDER])); if (i != 0 && j != 0) { - /* Test rustsecp256k1_v0_9_2_ecmult_const_xonly with all curve X coordinates, and xd=NULL. */ - ret = rustsecp256k1_v0_9_2_ecmult_const_xonly(&tmpf, &group[i].x, NULL, &ng, 0); + /* Test rustsecp256k1_v0_10_0_ecmult_const_xonly with all curve X coordinates, and xd=NULL. */ + ret = rustsecp256k1_v0_10_0_ecmult_const_xonly(&tmpf, &group[i].x, NULL, &ng, 0); CHECK(ret); - CHECK(rustsecp256k1_v0_9_2_fe_equal(&tmpf, &group[(i * j) % EXHAUSTIVE_TEST_ORDER].x)); + CHECK(rustsecp256k1_v0_10_0_fe_equal(&tmpf, &group[(i * j) % EXHAUSTIVE_TEST_ORDER].x)); - /* Test rustsecp256k1_v0_9_2_ecmult_const_xonly with all curve X coordinates, with random xd. */ + /* Test rustsecp256k1_v0_10_0_ecmult_const_xonly with all curve X coordinates, with random xd. */ random_fe_non_zero(&xd); - rustsecp256k1_v0_9_2_fe_mul(&xn, &xd, &group[i].x); - ret = rustsecp256k1_v0_9_2_ecmult_const_xonly(&tmpf, &xn, &xd, &ng, 0); + rustsecp256k1_v0_10_0_fe_mul(&xn, &xd, &group[i].x); + ret = rustsecp256k1_v0_10_0_ecmult_const_xonly(&tmpf, &xn, &xd, &ng, 0); CHECK(ret); - CHECK(rustsecp256k1_v0_9_2_fe_equal(&tmpf, &group[(i * j) % EXHAUSTIVE_TEST_ORDER].x)); + CHECK(rustsecp256k1_v0_10_0_fe_equal(&tmpf, &group[(i * j) % EXHAUSTIVE_TEST_ORDER].x)); } } } } typedef struct { - rustsecp256k1_v0_9_2_scalar sc[2]; - rustsecp256k1_v0_9_2_ge pt[2]; + rustsecp256k1_v0_10_0_scalar sc[2]; + rustsecp256k1_v0_10_0_ge pt[2]; } ecmult_multi_data; -static int ecmult_multi_callback(rustsecp256k1_v0_9_2_scalar *sc, rustsecp256k1_v0_9_2_ge *pt, size_t idx, void *cbdata) { +static int ecmult_multi_callback(rustsecp256k1_v0_10_0_scalar *sc, rustsecp256k1_v0_10_0_ge *pt, size_t idx, void *cbdata) { ecmult_multi_data *data = (ecmult_multi_data*) cbdata; *sc = data->sc[idx]; *pt = data->pt[idx]; return 1; } -static void test_exhaustive_ecmult_multi(const rustsecp256k1_v0_9_2_context *ctx, const rustsecp256k1_v0_9_2_ge *group) { +static void test_exhaustive_ecmult_multi(const rustsecp256k1_v0_10_0_context *ctx, const rustsecp256k1_v0_10_0_ge *group) { int i, j, k, x, y; uint64_t iter = 0; - rustsecp256k1_v0_9_2_scratch *scratch = rustsecp256k1_v0_9_2_scratch_create(&ctx->error_callback, 4096); + rustsecp256k1_v0_10_0_scratch *scratch = rustsecp256k1_v0_10_0_scratch_create(&ctx->error_callback, 4096); for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) { for (k = 0; k < EXHAUSTIVE_TEST_ORDER; k++) { for (x = 0; x < EXHAUSTIVE_TEST_ORDER; x++) { if (skip_section(&iter)) continue; for (y = 0; y < EXHAUSTIVE_TEST_ORDER; y++) { - rustsecp256k1_v0_9_2_gej tmp; - rustsecp256k1_v0_9_2_scalar g_sc; + rustsecp256k1_v0_10_0_gej tmp; + rustsecp256k1_v0_10_0_scalar g_sc; ecmult_multi_data data; - rustsecp256k1_v0_9_2_scalar_set_int(&data.sc[0], i); - rustsecp256k1_v0_9_2_scalar_set_int(&data.sc[1], j); - rustsecp256k1_v0_9_2_scalar_set_int(&g_sc, k); + rustsecp256k1_v0_10_0_scalar_set_int(&data.sc[0], i); + rustsecp256k1_v0_10_0_scalar_set_int(&data.sc[1], j); + rustsecp256k1_v0_10_0_scalar_set_int(&g_sc, k); data.pt[0] = group[x]; data.pt[1] = group[y]; - rustsecp256k1_v0_9_2_ecmult_multi_var(&ctx->error_callback, scratch, &tmp, &g_sc, ecmult_multi_callback, &data, 2); - ge_equals_gej(&group[(i * x + j * y + k) % EXHAUSTIVE_TEST_ORDER], &tmp); + rustsecp256k1_v0_10_0_ecmult_multi_var(&ctx->error_callback, scratch, &tmp, &g_sc, ecmult_multi_callback, &data, 2); + CHECK(rustsecp256k1_v0_10_0_gej_eq_ge_var(&tmp, &group[(i * x + j * y + k) % EXHAUSTIVE_TEST_ORDER])); } } } } } - rustsecp256k1_v0_9_2_scratch_destroy(&ctx->error_callback, scratch); + rustsecp256k1_v0_10_0_scratch_destroy(&ctx->error_callback, scratch); } -static void r_from_k(rustsecp256k1_v0_9_2_scalar *r, const rustsecp256k1_v0_9_2_ge *group, int k, int* overflow) { - rustsecp256k1_v0_9_2_fe x; +static void r_from_k(rustsecp256k1_v0_10_0_scalar *r, const rustsecp256k1_v0_10_0_ge *group, int k, int* overflow) { + rustsecp256k1_v0_10_0_fe x; unsigned char x_bin[32]; k %= EXHAUSTIVE_TEST_ORDER; x = group[k].x; - rustsecp256k1_v0_9_2_fe_normalize(&x); - rustsecp256k1_v0_9_2_fe_get_b32(x_bin, &x); - rustsecp256k1_v0_9_2_scalar_set_b32(r, x_bin, overflow); + rustsecp256k1_v0_10_0_fe_normalize(&x); + rustsecp256k1_v0_10_0_fe_get_b32(x_bin, &x); + rustsecp256k1_v0_10_0_scalar_set_b32(r, x_bin, overflow); } -static void test_exhaustive_verify(const rustsecp256k1_v0_9_2_context *ctx, const rustsecp256k1_v0_9_2_ge *group) { +static void test_exhaustive_verify(const rustsecp256k1_v0_10_0_context *ctx, const rustsecp256k1_v0_10_0_ge *group) { int s, r, msg, key; uint64_t iter = 0; for (s = 1; s < EXHAUSTIVE_TEST_ORDER; s++) { for (r = 1; r < EXHAUSTIVE_TEST_ORDER; r++) { for (msg = 1; msg < EXHAUSTIVE_TEST_ORDER; msg++) { for (key = 1; key < EXHAUSTIVE_TEST_ORDER; key++) { - rustsecp256k1_v0_9_2_ge nonconst_ge; - rustsecp256k1_v0_9_2_ecdsa_signature sig; - rustsecp256k1_v0_9_2_pubkey pk; - rustsecp256k1_v0_9_2_scalar sk_s, msg_s, r_s, s_s; - rustsecp256k1_v0_9_2_scalar s_times_k_s, msg_plus_r_times_sk_s; + rustsecp256k1_v0_10_0_ge nonconst_ge; + rustsecp256k1_v0_10_0_ecdsa_signature sig; + rustsecp256k1_v0_10_0_pubkey pk; + rustsecp256k1_v0_10_0_scalar sk_s, msg_s, r_s, s_s; + rustsecp256k1_v0_10_0_scalar s_times_k_s, msg_plus_r_times_sk_s; int k, should_verify; unsigned char msg32[32]; if (skip_section(&iter)) continue; - rustsecp256k1_v0_9_2_scalar_set_int(&s_s, s); - rustsecp256k1_v0_9_2_scalar_set_int(&r_s, r); - rustsecp256k1_v0_9_2_scalar_set_int(&msg_s, msg); - rustsecp256k1_v0_9_2_scalar_set_int(&sk_s, key); + rustsecp256k1_v0_10_0_scalar_set_int(&s_s, s); + rustsecp256k1_v0_10_0_scalar_set_int(&r_s, r); + rustsecp256k1_v0_10_0_scalar_set_int(&msg_s, msg); + rustsecp256k1_v0_10_0_scalar_set_int(&sk_s, key); /* Verify by hand */ /* Run through every k value that gives us this r and check that *one* works. * Note there could be none, there could be multiple, ECDSA is weird. */ should_verify = 0; for (k = 0; k < EXHAUSTIVE_TEST_ORDER; k++) { - rustsecp256k1_v0_9_2_scalar check_x_s; + rustsecp256k1_v0_10_0_scalar check_x_s; r_from_k(&check_x_s, group, k, NULL); if (r_s == check_x_s) { - rustsecp256k1_v0_9_2_scalar_set_int(&s_times_k_s, k); - rustsecp256k1_v0_9_2_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s); - rustsecp256k1_v0_9_2_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s); - rustsecp256k1_v0_9_2_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s); - should_verify |= rustsecp256k1_v0_9_2_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s); + rustsecp256k1_v0_10_0_scalar_set_int(&s_times_k_s, k); + rustsecp256k1_v0_10_0_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s); + rustsecp256k1_v0_10_0_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s); + rustsecp256k1_v0_10_0_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s); + should_verify |= rustsecp256k1_v0_10_0_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s); } } /* nb we have a "high s" rule */ - should_verify &= !rustsecp256k1_v0_9_2_scalar_is_high(&s_s); + should_verify &= !rustsecp256k1_v0_10_0_scalar_is_high(&s_s); /* Verify by calling verify */ - rustsecp256k1_v0_9_2_ecdsa_signature_save(&sig, &r_s, &s_s); + rustsecp256k1_v0_10_0_ecdsa_signature_save(&sig, &r_s, &s_s); memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge)); - rustsecp256k1_v0_9_2_pubkey_save(&pk, &nonconst_ge); - rustsecp256k1_v0_9_2_scalar_get_b32(msg32, &msg_s); + rustsecp256k1_v0_10_0_pubkey_save(&pk, &nonconst_ge); + rustsecp256k1_v0_10_0_scalar_get_b32(msg32, &msg_s); CHECK(should_verify == - rustsecp256k1_v0_9_2_ecdsa_verify(ctx, &sig, msg32, &pk)); + rustsecp256k1_v0_10_0_ecdsa_verify(ctx, &sig, msg32, &pk)); } } } } } -static void test_exhaustive_sign(const rustsecp256k1_v0_9_2_context *ctx, const rustsecp256k1_v0_9_2_ge *group) { +static void test_exhaustive_sign(const rustsecp256k1_v0_10_0_context *ctx, const rustsecp256k1_v0_10_0_ge *group) { int i, j, k; uint64_t iter = 0; @@ -348,18 +297,18 @@ static void test_exhaustive_sign(const rustsecp256k1_v0_9_2_context *ctx, const for (k = 1; k < EXHAUSTIVE_TEST_ORDER; k++) { /* nonce */ const int starting_k = k; int ret; - rustsecp256k1_v0_9_2_ecdsa_signature sig; - rustsecp256k1_v0_9_2_scalar sk, msg, r, s, expected_r; + rustsecp256k1_v0_10_0_ecdsa_signature sig; + rustsecp256k1_v0_10_0_scalar sk, msg, r, s, expected_r; unsigned char sk32[32], msg32[32]; - rustsecp256k1_v0_9_2_scalar_set_int(&msg, i); - rustsecp256k1_v0_9_2_scalar_set_int(&sk, j); - rustsecp256k1_v0_9_2_scalar_get_b32(sk32, &sk); - rustsecp256k1_v0_9_2_scalar_get_b32(msg32, &msg); + rustsecp256k1_v0_10_0_scalar_set_int(&msg, i); + rustsecp256k1_v0_10_0_scalar_set_int(&sk, j); + rustsecp256k1_v0_10_0_scalar_get_b32(sk32, &sk); + rustsecp256k1_v0_10_0_scalar_get_b32(msg32, &msg); - ret = rustsecp256k1_v0_9_2_ecdsa_sign(ctx, &sig, msg32, sk32, rustsecp256k1_v0_9_2_nonce_function_smallint, &k); + ret = rustsecp256k1_v0_10_0_ecdsa_sign(ctx, &sig, msg32, sk32, rustsecp256k1_v0_10_0_nonce_function_smallint, &k); CHECK(ret == 1); - rustsecp256k1_v0_9_2_ecdsa_signature_load(ctx, &r, &s, &sig); + rustsecp256k1_v0_10_0_ecdsa_signature_load(ctx, &r, &s, &sig); /* Note that we compute expected_r *after* signing -- this is important * because our nonce-computing function function might change k during * signing. */ @@ -404,10 +353,10 @@ static void test_exhaustive_sign(const rustsecp256k1_v0_9_2_context *ctx, const int main(int argc, char** argv) { int i; - rustsecp256k1_v0_9_2_gej groupj[EXHAUSTIVE_TEST_ORDER]; - rustsecp256k1_v0_9_2_ge group[EXHAUSTIVE_TEST_ORDER]; + rustsecp256k1_v0_10_0_gej groupj[EXHAUSTIVE_TEST_ORDER]; + rustsecp256k1_v0_10_0_ge group[EXHAUSTIVE_TEST_ORDER]; unsigned char rand32[32]; - rustsecp256k1_v0_9_2_context *ctx; + rustsecp256k1_v0_10_0_context *ctx; /* Disable buffering for stdout to improve reliability of getting * diagnostic information. Happens right at the start of main because @@ -426,7 +375,7 @@ int main(int argc, char** argv) { printf("test count = %i\n", count); /* find random seed */ - rustsecp256k1_v0_9_2_testrand_init(argc > 2 ? argv[2] : NULL); + rustsecp256k1_v0_10_0_testrand_init(argc > 2 ? argv[2] : NULL); /* set up split processing */ if (argc > 4) { @@ -440,43 +389,43 @@ int main(int argc, char** argv) { } /* Recreate the ecmult{,_gen} tables using the right generator (as selected via EXHAUSTIVE_TEST_ORDER) */ - rustsecp256k1_v0_9_2_ecmult_gen_compute_table(&rustsecp256k1_v0_9_2_ecmult_gen_prec_table[0][0], &rustsecp256k1_v0_9_2_ge_const_g, ECMULT_GEN_PREC_BITS); - rustsecp256k1_v0_9_2_ecmult_compute_two_tables(rustsecp256k1_v0_9_2_pre_g, rustsecp256k1_v0_9_2_pre_g_128, WINDOW_G, &rustsecp256k1_v0_9_2_ge_const_g); + rustsecp256k1_v0_10_0_ecmult_gen_compute_table(&rustsecp256k1_v0_10_0_ecmult_gen_prec_table[0][0], &rustsecp256k1_v0_10_0_ge_const_g, ECMULT_GEN_PREC_BITS); + rustsecp256k1_v0_10_0_ecmult_compute_two_tables(rustsecp256k1_v0_10_0_pre_g, rustsecp256k1_v0_10_0_pre_g_128, WINDOW_G, &rustsecp256k1_v0_10_0_ge_const_g); while (count--) { /* Build context */ - ctx = rustsecp256k1_v0_9_2_context_create(SECP256K1_CONTEXT_NONE); - rustsecp256k1_v0_9_2_testrand256(rand32); - CHECK(rustsecp256k1_v0_9_2_context_randomize(ctx, rand32)); + ctx = rustsecp256k1_v0_10_0_context_create(SECP256K1_CONTEXT_NONE); + rustsecp256k1_v0_10_0_testrand256(rand32); + CHECK(rustsecp256k1_v0_10_0_context_randomize(ctx, rand32)); /* Generate the entire group */ - rustsecp256k1_v0_9_2_gej_set_infinity(&groupj[0]); - rustsecp256k1_v0_9_2_ge_set_gej(&group[0], &groupj[0]); + rustsecp256k1_v0_10_0_gej_set_infinity(&groupj[0]); + rustsecp256k1_v0_10_0_ge_set_gej(&group[0], &groupj[0]); for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { - rustsecp256k1_v0_9_2_gej_add_ge(&groupj[i], &groupj[i - 1], &rustsecp256k1_v0_9_2_ge_const_g); - rustsecp256k1_v0_9_2_ge_set_gej(&group[i], &groupj[i]); + rustsecp256k1_v0_10_0_gej_add_ge(&groupj[i], &groupj[i - 1], &rustsecp256k1_v0_10_0_ge_const_g); + rustsecp256k1_v0_10_0_ge_set_gej(&group[i], &groupj[i]); if (count != 0) { /* Set a different random z-value for each Jacobian point, except z=1 is used in the last iteration. */ - rustsecp256k1_v0_9_2_fe z; + rustsecp256k1_v0_10_0_fe z; random_fe(&z); - rustsecp256k1_v0_9_2_gej_rescale(&groupj[i], &z); + rustsecp256k1_v0_10_0_gej_rescale(&groupj[i], &z); } /* Verify against ecmult_gen */ { - rustsecp256k1_v0_9_2_scalar scalar_i; - rustsecp256k1_v0_9_2_gej generatedj; - rustsecp256k1_v0_9_2_ge generated; + rustsecp256k1_v0_10_0_scalar scalar_i; + rustsecp256k1_v0_10_0_gej generatedj; + rustsecp256k1_v0_10_0_ge generated; - rustsecp256k1_v0_9_2_scalar_set_int(&scalar_i, i); - rustsecp256k1_v0_9_2_ecmult_gen(&ctx->ecmult_gen_ctx, &generatedj, &scalar_i); - rustsecp256k1_v0_9_2_ge_set_gej(&generated, &generatedj); + rustsecp256k1_v0_10_0_scalar_set_int(&scalar_i, i); + rustsecp256k1_v0_10_0_ecmult_gen(&ctx->ecmult_gen_ctx, &generatedj, &scalar_i); + rustsecp256k1_v0_10_0_ge_set_gej(&generated, &generatedj); CHECK(group[i].infinity == 0); CHECK(generated.infinity == 0); - CHECK(rustsecp256k1_v0_9_2_fe_equal(&generated.x, &group[i].x)); - CHECK(rustsecp256k1_v0_9_2_fe_equal(&generated.y, &group[i].y)); + CHECK(rustsecp256k1_v0_10_0_fe_equal(&generated.x, &group[i].x)); + CHECK(rustsecp256k1_v0_10_0_fe_equal(&generated.y, &group[i].y)); } } @@ -507,10 +456,10 @@ int main(int argc, char** argv) { #endif #endif - rustsecp256k1_v0_9_2_context_destroy(ctx); + rustsecp256k1_v0_10_0_context_destroy(ctx); } - rustsecp256k1_v0_9_2_testrand_finish(); + rustsecp256k1_v0_10_0_testrand_finish(); printf("no problems found\n"); return 0; diff --git a/secp256k1-sys/depend/secp256k1/src/testutil.h b/secp256k1-sys/depend/secp256k1/src/testutil.h new file mode 100644 index 000000000..78262ac37 --- /dev/null +++ b/secp256k1-sys/depend/secp256k1/src/testutil.h @@ -0,0 +1,29 @@ +/*********************************************************************** + * Distributed under the MIT software license, see the accompanying * + * file COPYING or https://www.opensource.org/licenses/mit-license.php.* + ***********************************************************************/ + +#ifndef SECP256K1_TESTUTIL_H +#define SECP256K1_TESTUTIL_H + +#include "field.h" +#include "testrand.h" +#include "util.h" + +static void random_fe(rustsecp256k1_v0_10_0_fe *x) { + unsigned char bin[32]; + do { + rustsecp256k1_v0_10_0_testrand256(bin); + if (rustsecp256k1_v0_10_0_fe_set_b32_limit(x, bin)) { + return; + } + } while(1); +} + +static void random_fe_non_zero(rustsecp256k1_v0_10_0_fe *nz) { + do { + random_fe(nz); + } while (rustsecp256k1_v0_10_0_fe_is_zero(nz)); +} + +#endif /* SECP256K1_TESTUTIL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/util.h b/secp256k1-sys/depend/secp256k1/src/util.h index 49a650915..b4ba0b38d 100644 --- a/secp256k1-sys/depend/secp256k1/src/util.h +++ b/secp256k1-sys/depend/secp256k1/src/util.h @@ -67,35 +67,35 @@ static void print_buf_plain(const unsigned char *buf, size_t len) { typedef struct { void (*fn)(const char *text, void* data); const void* data; -} rustsecp256k1_v0_9_2_callback; +} rustsecp256k1_v0_10_0_callback; -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_callback_call(const rustsecp256k1_v0_9_2_callback * const cb, const char * const text) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_callback_call(const rustsecp256k1_v0_10_0_callback * const cb, const char * const text) { cb->fn(text, (void*)cb->data); } #ifndef USE_EXTERNAL_DEFAULT_CALLBACKS -static void rustsecp256k1_v0_9_2_default_illegal_callback_fn(const char* str, void* data) { +static void rustsecp256k1_v0_10_0_default_illegal_callback_fn(const char* str, void* data) { (void)data; fprintf(stderr, "[libsecp256k1] illegal argument: %s\n", str); abort(); } -static void rustsecp256k1_v0_9_2_default_error_callback_fn(const char* str, void* data) { +static void rustsecp256k1_v0_10_0_default_error_callback_fn(const char* str, void* data) { (void)data; fprintf(stderr, "[libsecp256k1] internal consistency check failed: %s\n", str); abort(); } #else -void rustsecp256k1_v0_9_2_default_illegal_callback_fn(const char* str, void* data); -void rustsecp256k1_v0_9_2_default_error_callback_fn(const char* str, void* data); +void rustsecp256k1_v0_10_0_default_illegal_callback_fn(const char* str, void* data); +void rustsecp256k1_v0_10_0_default_error_callback_fn(const char* str, void* data); #endif -static const rustsecp256k1_v0_9_2_callback default_illegal_callback = { - rustsecp256k1_v0_9_2_default_illegal_callback_fn, +static const rustsecp256k1_v0_10_0_callback default_illegal_callback = { + rustsecp256k1_v0_10_0_default_illegal_callback_fn, NULL }; -static const rustsecp256k1_v0_9_2_callback default_error_callback = { - rustsecp256k1_v0_9_2_default_error_callback_fn, +static const rustsecp256k1_v0_10_0_callback default_error_callback = { + rustsecp256k1_v0_10_0_default_error_callback_fn, NULL }; @@ -132,19 +132,14 @@ static const rustsecp256k1_v0_9_2_callback default_error_callback = { } while(0) #endif -/* Like assert(), but when VERIFY is defined, and side-effect safe. */ -#if defined(COVERAGE) -#define VERIFY_CHECK(check) -#define VERIFY_SETUP(stmt) -#elif defined(VERIFY) +/* Like assert(), but when VERIFY is defined. */ +#if defined(VERIFY) #define VERIFY_CHECK CHECK -#define VERIFY_SETUP(stmt) do { stmt; } while(0) #else -#define VERIFY_CHECK(cond) do { (void)(cond); } while(0) -#define VERIFY_SETUP(stmt) +#define VERIFY_CHECK(cond) #endif -static SECP256K1_INLINE void *checked_malloc(const rustsecp256k1_v0_9_2_callback* cb, size_t size) { +static SECP256K1_INLINE void *checked_malloc(const rustsecp256k1_v0_10_0_callback* cb, size_t size) { (void) cb; (void) size; return NULL; @@ -193,7 +188,7 @@ static SECP256K1_INLINE void *checked_malloc(const rustsecp256k1_v0_9_2_callback #endif /* Zero memory if flag == 1. Flag must be 0 or 1. Constant time. */ -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_memczero(void *s, size_t len, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_memczero(void *s, size_t len, int flag) { unsigned char *p = (unsigned char *)s; /* Access flag with a volatile-qualified lvalue. This prevents clang from figuring out (after inlining) that flag can @@ -212,7 +207,7 @@ static SECP256K1_INLINE void rustsecp256k1_v0_9_2_memczero(void *s, size_t len, * We use this to avoid possible compiler bugs with memcmp, e.g. * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95189 */ -static SECP256K1_INLINE int rustsecp256k1_v0_9_2_memcmp_var(const void *s1, const void *s2, size_t n) { +static SECP256K1_INLINE int rustsecp256k1_v0_10_0_memcmp_var(const void *s1, const void *s2, size_t n) { const unsigned char *p1 = s1, *p2 = s2; size_t i; @@ -226,7 +221,7 @@ static SECP256K1_INLINE int rustsecp256k1_v0_9_2_memcmp_var(const void *s1, cons } /** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized and non-negative.*/ -static SECP256K1_INLINE void rustsecp256k1_v0_9_2_int_cmov(int *r, const int *a, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_int_cmov(int *r, const int *a, int flag) { unsigned int mask0, mask1, r_masked, a_masked; /* Access flag with a volatile-qualified lvalue. This prevents clang from figuring out (after inlining) that flag can @@ -281,8 +276,8 @@ static SECP256K1_INLINE void rustsecp256k1_v0_9_2_int_cmov(int *r, const int *a, /* Determine the number of trailing zero bits in a (non-zero) 32-bit x. * This function is only intended to be used as fallback for - * rustsecp256k1_v0_9_2_ctz32_var, but permits it to be tested separately. */ -static SECP256K1_INLINE int rustsecp256k1_v0_9_2_ctz32_var_debruijn(uint32_t x) { + * rustsecp256k1_v0_10_0_ctz32_var, but permits it to be tested separately. */ +static SECP256K1_INLINE int rustsecp256k1_v0_10_0_ctz32_var_debruijn(uint32_t x) { static const uint8_t debruijn[32] = { 0x00, 0x01, 0x02, 0x18, 0x03, 0x13, 0x06, 0x19, 0x16, 0x04, 0x14, 0x0A, 0x10, 0x07, 0x0C, 0x1A, 0x1F, 0x17, 0x12, 0x05, 0x15, 0x09, 0x0F, 0x0B, @@ -293,8 +288,8 @@ static SECP256K1_INLINE int rustsecp256k1_v0_9_2_ctz32_var_debruijn(uint32_t x) /* Determine the number of trailing zero bits in a (non-zero) 64-bit x. * This function is only intended to be used as fallback for - * rustsecp256k1_v0_9_2_ctz64_var, but permits it to be tested separately. */ -static SECP256K1_INLINE int rustsecp256k1_v0_9_2_ctz64_var_debruijn(uint64_t x) { + * rustsecp256k1_v0_10_0_ctz64_var, but permits it to be tested separately. */ +static SECP256K1_INLINE int rustsecp256k1_v0_10_0_ctz64_var_debruijn(uint64_t x) { static const uint8_t debruijn[64] = { 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28, 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11, @@ -305,7 +300,7 @@ static SECP256K1_INLINE int rustsecp256k1_v0_9_2_ctz64_var_debruijn(uint64_t x) } /* Determine the number of trailing zero bits in a (non-zero) 32-bit x. */ -static SECP256K1_INLINE int rustsecp256k1_v0_9_2_ctz32_var(uint32_t x) { +static SECP256K1_INLINE int rustsecp256k1_v0_10_0_ctz32_var(uint32_t x) { VERIFY_CHECK(x != 0); #if (__has_builtin(__builtin_ctz) || SECP256K1_GNUC_PREREQ(3,4)) /* If the unsigned type is sufficient to represent the largest uint32_t, consider __builtin_ctz. */ @@ -318,12 +313,12 @@ static SECP256K1_INLINE int rustsecp256k1_v0_9_2_ctz32_var(uint32_t x) { return __builtin_ctzl(x); #else /* If no suitable CTZ builtin is available, use a (variable time) software emulation. */ - return rustsecp256k1_v0_9_2_ctz32_var_debruijn(x); + return rustsecp256k1_v0_10_0_ctz32_var_debruijn(x); #endif } /* Determine the number of trailing zero bits in a (non-zero) 64-bit x. */ -static SECP256K1_INLINE int rustsecp256k1_v0_9_2_ctz64_var(uint64_t x) { +static SECP256K1_INLINE int rustsecp256k1_v0_10_0_ctz64_var(uint64_t x) { VERIFY_CHECK(x != 0); #if (__has_builtin(__builtin_ctzl) || SECP256K1_GNUC_PREREQ(3,4)) /* If the unsigned long type is sufficient to represent the largest uint64_t, consider __builtin_ctzl. */ @@ -336,12 +331,12 @@ static SECP256K1_INLINE int rustsecp256k1_v0_9_2_ctz64_var(uint64_t x) { return __builtin_ctzll(x); #else /* If no suitable CTZ builtin is available, use a (variable time) software emulation. */ - return rustsecp256k1_v0_9_2_ctz64_var_debruijn(x); + return rustsecp256k1_v0_10_0_ctz64_var_debruijn(x); #endif } /* Read a uint32_t in big endian */ -SECP256K1_INLINE static uint32_t rustsecp256k1_v0_9_2_read_be32(const unsigned char* p) { +SECP256K1_INLINE static uint32_t rustsecp256k1_v0_10_0_read_be32(const unsigned char* p) { return (uint32_t)p[0] << 24 | (uint32_t)p[1] << 16 | (uint32_t)p[2] << 8 | @@ -349,7 +344,7 @@ SECP256K1_INLINE static uint32_t rustsecp256k1_v0_9_2_read_be32(const unsigned c } /* Write a uint32_t in big endian */ -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_write_be32(unsigned char* p, uint32_t x) { +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_write_be32(unsigned char* p, uint32_t x) { p[3] = x; p[2] = x >> 8; p[1] = x >> 16; @@ -357,7 +352,7 @@ SECP256K1_INLINE static void rustsecp256k1_v0_9_2_write_be32(unsigned char* p, u } /* Read a uint64_t in big endian */ -SECP256K1_INLINE static uint64_t rustsecp256k1_v0_9_2_read_be64(const unsigned char* p) { +SECP256K1_INLINE static uint64_t rustsecp256k1_v0_10_0_read_be64(const unsigned char* p) { return (uint64_t)p[0] << 56 | (uint64_t)p[1] << 48 | (uint64_t)p[2] << 40 | @@ -369,7 +364,7 @@ SECP256K1_INLINE static uint64_t rustsecp256k1_v0_9_2_read_be64(const unsigned c } /* Write a uint64_t in big endian */ -SECP256K1_INLINE static void rustsecp256k1_v0_9_2_write_be64(unsigned char* p, uint64_t x) { +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_write_be64(unsigned char* p, uint64_t x) { p[7] = x; p[6] = x >> 8; p[5] = x >> 16; diff --git a/secp256k1-sys/depend/secp256k1/src/util.h.orig b/secp256k1-sys/depend/secp256k1/src/util.h.orig new file mode 100644 index 000000000..7a9cf796b --- /dev/null +++ b/secp256k1-sys/depend/secp256k1/src/util.h.orig @@ -0,0 +1,380 @@ +/*********************************************************************** + * Copyright (c) 2013, 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or https://www.opensource.org/licenses/mit-license.php.* + ***********************************************************************/ + +#ifndef SECP256K1_UTIL_H +#define SECP256K1_UTIL_H + +#include "../include/secp256k1.h" + +#include +#include +#include +#include + +#define STR_(x) #x +#define STR(x) STR_(x) +#define DEBUG_CONFIG_MSG(x) "DEBUG_CONFIG: " x +#define DEBUG_CONFIG_DEF(x) DEBUG_CONFIG_MSG(#x "=" STR(x)) + +/* Debug helper for printing arrays of unsigned char. */ +#define PRINT_BUF(buf, len) do { \ + printf("%s[%lu] = ", #buf, (unsigned long)len); \ + print_buf_plain(buf, len); \ +} while(0) + +static void print_buf_plain(const unsigned char *buf, size_t len) { + size_t i; + printf("{"); + for (i = 0; i < len; i++) { + if (i % 8 == 0) { + printf("\n "); + } else { + printf(" "); + } + printf("0x%02X,", buf[i]); + } + printf("\n}\n"); +} + +# if (!defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L) ) +# if SECP256K1_GNUC_PREREQ(2,7) +# define SECP256K1_INLINE __inline__ +# elif (defined(_MSC_VER)) +# define SECP256K1_INLINE __inline +# else +# define SECP256K1_INLINE +# endif +# else +# define SECP256K1_INLINE inline +# endif + +/** Assert statically that expr is an integer constant expression, and run stmt. + * + * Useful for example to enforce that magnitude arguments are constant. + */ +#define ASSERT_INT_CONST_AND_DO(expr, stmt) do { \ + switch(42) { \ + case /* ERROR: integer argument is not constant */ expr: \ + break; \ + default: ; \ + } \ + stmt; \ +} while(0) + +typedef struct { + void (*fn)(const char *text, void* data); + const void* data; +} rustsecp256k1_v0_10_0_callback; + +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_callback_call(const rustsecp256k1_v0_10_0_callback * const cb, const char * const text) { + cb->fn(text, (void*)cb->data); +} + +#ifndef USE_EXTERNAL_DEFAULT_CALLBACKS +static void rustsecp256k1_v0_10_0_default_illegal_callback_fn(const char* str, void* data) { + (void)data; + fprintf(stderr, "[libsecp256k1] illegal argument: %s\n", str); + abort(); +} +static void rustsecp256k1_v0_10_0_default_error_callback_fn(const char* str, void* data) { + (void)data; + fprintf(stderr, "[libsecp256k1] internal consistency check failed: %s\n", str); + abort(); +} +#else +void rustsecp256k1_v0_10_0_default_illegal_callback_fn(const char* str, void* data); +void rustsecp256k1_v0_10_0_default_error_callback_fn(const char* str, void* data); +#endif + +static const rustsecp256k1_v0_10_0_callback default_illegal_callback = { + rustsecp256k1_v0_10_0_default_illegal_callback_fn, + NULL +}; + +static const rustsecp256k1_v0_10_0_callback default_error_callback = { + rustsecp256k1_v0_10_0_default_error_callback_fn, + NULL +}; + + +#ifdef DETERMINISTIC +#define TEST_FAILURE(msg) do { \ + fprintf(stderr, "%s\n", msg); \ + abort(); \ +} while(0); +#else +#define TEST_FAILURE(msg) do { \ + fprintf(stderr, "%s:%d: %s\n", __FILE__, __LINE__, msg); \ + abort(); \ +} while(0) +#endif + +#if SECP256K1_GNUC_PREREQ(3, 0) +#define EXPECT(x,c) __builtin_expect((x),(c)) +#else +#define EXPECT(x,c) (x) +#endif + +#ifdef DETERMINISTIC +#define CHECK(cond) do { \ + if (EXPECT(!(cond), 0)) { \ + TEST_FAILURE("test condition failed"); \ + } \ +} while(0) +#else +#define CHECK(cond) do { \ + if (EXPECT(!(cond), 0)) { \ + TEST_FAILURE("test condition failed: " #cond); \ + } \ +} while(0) +#endif + +/* Like assert(), but when VERIFY is defined. */ +#if defined(VERIFY) +#define VERIFY_CHECK CHECK +#else +#define VERIFY_CHECK(cond) +#endif + +static SECP256K1_INLINE void *checked_malloc(const rustsecp256k1_v0_10_0_callback* cb, size_t size) { + void *ret = malloc(size); + if (ret == NULL) { + rustsecp256k1_v0_10_0_callback_call(cb, "Out of memory"); + } + return ret; +} + +#if defined(__BIGGEST_ALIGNMENT__) +#define ALIGNMENT __BIGGEST_ALIGNMENT__ +#else +/* Using 16 bytes alignment because common architectures never have alignment + * requirements above 8 for any of the types we care about. In addition we + * leave some room because currently we don't care about a few bytes. */ +#define ALIGNMENT 16 +#endif + +#define ROUND_TO_ALIGN(size) ((((size) + ALIGNMENT - 1) / ALIGNMENT) * ALIGNMENT) + +/* Macro for restrict, when available and not in a VERIFY build. */ +#if defined(SECP256K1_BUILD) && defined(VERIFY) +# define SECP256K1_RESTRICT +#else +# if (!defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L) ) +# if SECP256K1_GNUC_PREREQ(3,0) +# define SECP256K1_RESTRICT __restrict__ +# elif (defined(_MSC_VER) && _MSC_VER >= 1400) +# define SECP256K1_RESTRICT __restrict +# else +# define SECP256K1_RESTRICT +# endif +# else +# define SECP256K1_RESTRICT restrict +# endif +#endif + +#if defined(_WIN32) +# define I64FORMAT "I64d" +# define I64uFORMAT "I64u" +#else +# define I64FORMAT "lld" +# define I64uFORMAT "llu" +#endif + +#if defined(__GNUC__) +# define SECP256K1_GNUC_EXT __extension__ +#else +# define SECP256K1_GNUC_EXT +#endif + +/* Zero memory if flag == 1. Flag must be 0 or 1. Constant time. */ +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_memczero(void *s, size_t len, int flag) { + unsigned char *p = (unsigned char *)s; + /* Access flag with a volatile-qualified lvalue. + This prevents clang from figuring out (after inlining) that flag can + take only be 0 or 1, which leads to variable time code. */ + volatile int vflag = flag; + unsigned char mask = -(unsigned char) vflag; + while (len) { + *p &= ~mask; + p++; + len--; + } +} + +/** Semantics like memcmp. Variable-time. + * + * We use this to avoid possible compiler bugs with memcmp, e.g. + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95189 + */ +static SECP256K1_INLINE int rustsecp256k1_v0_10_0_memcmp_var(const void *s1, const void *s2, size_t n) { + const unsigned char *p1 = s1, *p2 = s2; + size_t i; + + for (i = 0; i < n; i++) { + int diff = p1[i] - p2[i]; + if (diff != 0) { + return diff; + } + } + return 0; +} + +/** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized and non-negative.*/ +static SECP256K1_INLINE void rustsecp256k1_v0_10_0_int_cmov(int *r, const int *a, int flag) { + unsigned int mask0, mask1, r_masked, a_masked; + /* Access flag with a volatile-qualified lvalue. + This prevents clang from figuring out (after inlining) that flag can + take only be 0 or 1, which leads to variable time code. */ + volatile int vflag = flag; + + /* Casting a negative int to unsigned and back to int is implementation defined behavior */ + VERIFY_CHECK(*r >= 0 && *a >= 0); + + mask0 = (unsigned int)vflag + ~0u; + mask1 = ~mask0; + r_masked = ((unsigned int)*r & mask0); + a_masked = ((unsigned int)*a & mask1); + + *r = (int)(r_masked | a_masked); +} + +#if defined(USE_FORCE_WIDEMUL_INT128_STRUCT) +/* If USE_FORCE_WIDEMUL_INT128_STRUCT is set, use int128_struct. */ +# define SECP256K1_WIDEMUL_INT128 1 +# define SECP256K1_INT128_STRUCT 1 +#elif defined(USE_FORCE_WIDEMUL_INT128) +/* If USE_FORCE_WIDEMUL_INT128 is set, use int128. */ +# define SECP256K1_WIDEMUL_INT128 1 +# define SECP256K1_INT128_NATIVE 1 +#elif defined(USE_FORCE_WIDEMUL_INT64) +/* If USE_FORCE_WIDEMUL_INT64 is set, use int64. */ +# define SECP256K1_WIDEMUL_INT64 1 +#elif defined(UINT128_MAX) || defined(__SIZEOF_INT128__) +/* If a native 128-bit integer type exists, use int128. */ +# define SECP256K1_WIDEMUL_INT128 1 +# define SECP256K1_INT128_NATIVE 1 +#elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64)) +/* On 64-bit MSVC targets (x86_64 and arm64), use int128_struct + * (which has special logic to implement using intrinsics on those systems). */ +# define SECP256K1_WIDEMUL_INT128 1 +# define SECP256K1_INT128_STRUCT 1 +#elif SIZE_MAX > 0xffffffff +/* Systems with 64-bit pointers (and thus registers) very likely benefit from + * using 64-bit based arithmetic (even if we need to fall back to 32x32->64 based + * multiplication logic). */ +# define SECP256K1_WIDEMUL_INT128 1 +# define SECP256K1_INT128_STRUCT 1 +#else +/* Lastly, fall back to int64 based arithmetic. */ +# define SECP256K1_WIDEMUL_INT64 1 +#endif + +#ifndef __has_builtin +#define __has_builtin(x) 0 +#endif + +/* Determine the number of trailing zero bits in a (non-zero) 32-bit x. + * This function is only intended to be used as fallback for + * rustsecp256k1_v0_10_0_ctz32_var, but permits it to be tested separately. */ +static SECP256K1_INLINE int rustsecp256k1_v0_10_0_ctz32_var_debruijn(uint32_t x) { + static const uint8_t debruijn[32] = { + 0x00, 0x01, 0x02, 0x18, 0x03, 0x13, 0x06, 0x19, 0x16, 0x04, 0x14, 0x0A, + 0x10, 0x07, 0x0C, 0x1A, 0x1F, 0x17, 0x12, 0x05, 0x15, 0x09, 0x0F, 0x0B, + 0x1E, 0x11, 0x08, 0x0E, 0x1D, 0x0D, 0x1C, 0x1B + }; + return debruijn[(uint32_t)((x & -x) * 0x04D7651FU) >> 27]; +} + +/* Determine the number of trailing zero bits in a (non-zero) 64-bit x. + * This function is only intended to be used as fallback for + * rustsecp256k1_v0_10_0_ctz64_var, but permits it to be tested separately. */ +static SECP256K1_INLINE int rustsecp256k1_v0_10_0_ctz64_var_debruijn(uint64_t x) { + static const uint8_t debruijn[64] = { + 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28, + 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11, + 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10, + 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12 + }; + return debruijn[(uint64_t)((x & -x) * 0x022FDD63CC95386DU) >> 58]; +} + +/* Determine the number of trailing zero bits in a (non-zero) 32-bit x. */ +static SECP256K1_INLINE int rustsecp256k1_v0_10_0_ctz32_var(uint32_t x) { + VERIFY_CHECK(x != 0); +#if (__has_builtin(__builtin_ctz) || SECP256K1_GNUC_PREREQ(3,4)) + /* If the unsigned type is sufficient to represent the largest uint32_t, consider __builtin_ctz. */ + if (((unsigned)UINT32_MAX) == UINT32_MAX) { + return __builtin_ctz(x); + } +#endif +#if (__has_builtin(__builtin_ctzl) || SECP256K1_GNUC_PREREQ(3,4)) + /* Otherwise consider __builtin_ctzl (the unsigned long type is always at least 32 bits). */ + return __builtin_ctzl(x); +#else + /* If no suitable CTZ builtin is available, use a (variable time) software emulation. */ + return rustsecp256k1_v0_10_0_ctz32_var_debruijn(x); +#endif +} + +/* Determine the number of trailing zero bits in a (non-zero) 64-bit x. */ +static SECP256K1_INLINE int rustsecp256k1_v0_10_0_ctz64_var(uint64_t x) { + VERIFY_CHECK(x != 0); +#if (__has_builtin(__builtin_ctzl) || SECP256K1_GNUC_PREREQ(3,4)) + /* If the unsigned long type is sufficient to represent the largest uint64_t, consider __builtin_ctzl. */ + if (((unsigned long)UINT64_MAX) == UINT64_MAX) { + return __builtin_ctzl(x); + } +#endif +#if (__has_builtin(__builtin_ctzll) || SECP256K1_GNUC_PREREQ(3,4)) + /* Otherwise consider __builtin_ctzll (the unsigned long long type is always at least 64 bits). */ + return __builtin_ctzll(x); +#else + /* If no suitable CTZ builtin is available, use a (variable time) software emulation. */ + return rustsecp256k1_v0_10_0_ctz64_var_debruijn(x); +#endif +} + +/* Read a uint32_t in big endian */ +SECP256K1_INLINE static uint32_t rustsecp256k1_v0_10_0_read_be32(const unsigned char* p) { + return (uint32_t)p[0] << 24 | + (uint32_t)p[1] << 16 | + (uint32_t)p[2] << 8 | + (uint32_t)p[3]; +} + +/* Write a uint32_t in big endian */ +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_write_be32(unsigned char* p, uint32_t x) { + p[3] = x; + p[2] = x >> 8; + p[1] = x >> 16; + p[0] = x >> 24; +} + +/* Read a uint64_t in big endian */ +SECP256K1_INLINE static uint64_t rustsecp256k1_v0_10_0_read_be64(const unsigned char* p) { + return (uint64_t)p[0] << 56 | + (uint64_t)p[1] << 48 | + (uint64_t)p[2] << 40 | + (uint64_t)p[3] << 32 | + (uint64_t)p[4] << 24 | + (uint64_t)p[5] << 16 | + (uint64_t)p[6] << 8 | + (uint64_t)p[7]; +} + +/* Write a uint64_t in big endian */ +SECP256K1_INLINE static void rustsecp256k1_v0_10_0_write_be64(unsigned char* p, uint64_t x) { + p[7] = x; + p[6] = x >> 8; + p[5] = x >> 16; + p[4] = x >> 24; + p[3] = x >> 32; + p[2] = x >> 40; + p[1] = x >> 48; + p[0] = x >> 56; +} + +#endif /* SECP256K1_UTIL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/wycheproof/WYCHEPROOF_COPYING b/secp256k1-sys/depend/secp256k1/src/wycheproof/WYCHEPROOF_COPYING index 0fd99ee8f..71db1983e 100644 --- a/secp256k1-sys/depend/secp256k1/src/wycheproof/WYCHEPROOF_COPYING +++ b/secp256k1-sys/depend/secp256k1/src/wycheproof/WYCHEPROOF_COPYING @@ -1,10 +1,10 @@ -* The file `ecdsa_rustsecp256k1_v0_9_2_sha256_bitcoin_test.json` in this directory +* The file `ecdsa_rustsecp256k1_v0_10_0_sha256_bitcoin_test.json` in this directory comes from Google's project Wycheproof with git commit `b063b4aedae951c69df014cd25fa6d69ae9e8cb9`, see - https://github.com/google/wycheproof/blob/b063b4aedae951c69df014cd25fa6d69ae9e8cb9/testvectors_v1/ecdsa_rustsecp256k1_v0_9_2_sha256_bitcoin_test.json + https://github.com/google/wycheproof/blob/b063b4aedae951c69df014cd25fa6d69ae9e8cb9/testvectors_v1/ecdsa_rustsecp256k1_v0_10_0_sha256_bitcoin_test.json -* The file `ecdsa_rustsecp256k1_v0_9_2_sha256_bitcoin_test.h` is generated from - `ecdsa_rustsecp256k1_v0_9_2_sha256_bitcoin_test.json` using the script +* The file `ecdsa_rustsecp256k1_v0_10_0_sha256_bitcoin_test.h` is generated from + `ecdsa_rustsecp256k1_v0_10_0_sha256_bitcoin_test.json` using the script `tests_wycheproof_generate.py`. ------------------------------------------------------------------------------- diff --git a/secp256k1-sys/depend/secp256k1/tools/check-abi.sh b/secp256k1-sys/depend/secp256k1/tools/check-abi.sh new file mode 100755 index 000000000..8f6119cd8 --- /dev/null +++ b/secp256k1-sys/depend/secp256k1/tools/check-abi.sh @@ -0,0 +1,64 @@ +#!/bin/sh + +set -eu + +default_base_version="$(git describe --match "v*.*.*" --abbrev=0)" +default_new_version="master" + +display_help_and_exit() { + echo "Usage: $0 " + echo "" + echo "Description: This script uses the ABI Compliance Checker tool to determine if the ABI" + echo " of a new version of libsecp256k1 has changed in a backward-incompatible way." + echo "" + echo "Options:" + echo " base_ver Specify the base version (default: $default_base_version)" + echo " new_ver Specify the new version (default: $default_new_version)" + echo " -h, --help Display this help message" + exit 0 +} + +if [ "$#" -eq 0 ]; then + base_version="$default_base_version" + new_version="$default_new_version" +elif [ "$#" -eq 1 ] && { [ "$1" = "-h" ] || [ "$1" = "--help" ]; }; then + display_help_and_exit +elif [ "$#" -eq 2 ]; then + base_version="$1" + new_version="$2" +else + echo "Invalid usage. See help:" + echo "" + display_help_and_exit +fi + +checkout_and_build() { + git worktree add -d "$1" "$2" + cd "$1" + mkdir build && cd build + cmake -S .. --preset dev-mode \ + -DCMAKE_C_COMPILER=gcc -DCMAKE_BUILD_TYPE=None -DCMAKE_C_FLAGS="-g -Og -gdwarf-4" \ + -DSECP256K1_BUILD_BENCHMARK=OFF \ + -DSECP256K1_BUILD_TESTS=OFF \ + -DSECP256K1_BUILD_EXHAUSTIVE_TESTS=OFF \ + -DSECP256K1_BUILD_CTIME_TESTS=OFF \ + -DSECP256K1_BUILD_EXAMPLES=OFF + cmake --build . -j "$(nproc)" + abi-dumper src/libsecp256k1.so -o ABI.dump -lver "$2" +} + +echo "Comparing $base_version (base version) to $new_version (new version)" +echo + +original_dir="$(pwd)" + +base_source_dir=$(mktemp -d) +checkout_and_build "$base_source_dir" "$base_version" + +new_source_dir=$(mktemp -d) +checkout_and_build "$new_source_dir" "$new_version" + +cd "$original_dir" +abi-compliance-checker -lib libsecp256k1 -old "${base_source_dir}/build/ABI.dump" -new "${new_source_dir}/build/ABI.dump" +git worktree remove "$base_source_dir" +git worktree remove "$new_source_dir" diff --git a/secp256k1-sys/src/lib.rs b/secp256k1-sys/src/lib.rs index b31d5d3f9..872545c53 100644 --- a/secp256k1-sys/src/lib.rs +++ b/secp256k1-sys/src/lib.rs @@ -545,110 +545,110 @@ impl_raw_debug!(ElligatorSwift); extern "C" { /// Default ECDH hash function - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ecdh_hash_function_default")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ecdh_hash_function_default")] pub static secp256k1_ecdh_hash_function_default: EcdhHashFn; /// Default ECDH hash function for BIP324 key establishment - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ellswift_xdh_hash_function_bip324")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ellswift_xdh_hash_function_bip324")] pub static secp256k1_ellswift_xdh_hash_function_bip324: EllswiftEcdhHashFn; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_nonce_function_rfc6979")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_nonce_function_rfc6979")] pub static secp256k1_nonce_function_rfc6979: NonceFn; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_nonce_function_default")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_nonce_function_default")] pub static secp256k1_nonce_function_default: NonceFn; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_nonce_function_bip340")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_nonce_function_bip340")] pub static secp256k1_nonce_function_bip340: SchnorrNonceFn; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_context_no_precomp")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_context_no_precomp")] pub static secp256k1_context_no_precomp: *const Context; // Contexts - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_context_preallocated_destroy")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_context_preallocated_destroy")] pub fn secp256k1_context_preallocated_destroy(cx: NonNull); // Signatures - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ecdsa_signature_parse_der")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ecdsa_signature_parse_der")] pub fn secp256k1_ecdsa_signature_parse_der(cx: *const Context, sig: *mut Signature, input: *const c_uchar, in_len: size_t) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ecdsa_signature_parse_compact")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ecdsa_signature_parse_compact")] pub fn secp256k1_ecdsa_signature_parse_compact(cx: *const Context, sig: *mut Signature, input64: *const c_uchar) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ecdsa_signature_parse_der_lax")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ecdsa_signature_parse_der_lax")] pub fn ecdsa_signature_parse_der_lax(cx: *const Context, sig: *mut Signature, input: *const c_uchar, in_len: size_t) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ecdsa_signature_serialize_der")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ecdsa_signature_serialize_der")] pub fn secp256k1_ecdsa_signature_serialize_der(cx: *const Context, output: *mut c_uchar, out_len: *mut size_t, sig: *const Signature) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ecdsa_signature_serialize_compact")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ecdsa_signature_serialize_compact")] pub fn secp256k1_ecdsa_signature_serialize_compact(cx: *const Context, output64: *mut c_uchar, sig: *const Signature) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ecdsa_signature_normalize")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ecdsa_signature_normalize")] pub fn secp256k1_ecdsa_signature_normalize(cx: *const Context, out_sig: *mut Signature, in_sig: *const Signature) -> c_int; // Secret Keys - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ec_seckey_verify")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ec_seckey_verify")] pub fn secp256k1_ec_seckey_verify(cx: *const Context, sk: *const c_uchar) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ec_seckey_negate")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ec_seckey_negate")] pub fn secp256k1_ec_seckey_negate(cx: *const Context, sk: *mut c_uchar) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ec_seckey_tweak_add")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ec_seckey_tweak_add")] pub fn secp256k1_ec_seckey_tweak_add(cx: *const Context, sk: *mut c_uchar, tweak: *const c_uchar) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ec_seckey_tweak_mul")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ec_seckey_tweak_mul")] pub fn secp256k1_ec_seckey_tweak_mul(cx: *const Context, sk: *mut c_uchar, tweak: *const c_uchar) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_keypair_sec")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_keypair_sec")] pub fn secp256k1_keypair_sec(cx: *const Context, output_seckey: *mut c_uchar, keypair: *const Keypair) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_keypair_pub")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_keypair_pub")] pub fn secp256k1_keypair_pub(cx: *const Context, output_pubkey: *mut PublicKey, keypair: *const Keypair) -> c_int; // Elligator Swift - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ellswift_encode")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ellswift_encode")] pub fn secp256k1_ellswift_encode(ctx: *const Context, ell64: *mut c_uchar, pubkey: *const PublicKey, rnd32: *const c_uchar) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ellswift_decode")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ellswift_decode")] pub fn secp256k1_ellswift_decode(ctx: *const Context, pubkey: *mut u8, ell64: *const c_uchar) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ellswift_create")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ellswift_create")] pub fn secp256k1_ellswift_create(ctx: *const Context, ell64: *mut c_uchar, seckey32: *const c_uchar, aux_rand32: *const c_uchar) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ellswift_xdh")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ellswift_xdh")] pub fn secp256k1_ellswift_xdh(ctx: *const Context, output: *mut c_uchar, ell_a64: *const c_uchar, @@ -663,71 +663,71 @@ extern "C" { #[cfg(not(secp256k1_fuzz))] extern "C" { // Contexts - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_context_preallocated_size")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_context_preallocated_size")] pub fn secp256k1_context_preallocated_size(flags: c_uint) -> size_t; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_context_preallocated_create")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_context_preallocated_create")] pub fn secp256k1_context_preallocated_create(prealloc: NonNull, flags: c_uint) -> NonNull; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_context_preallocated_clone_size")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_context_preallocated_clone_size")] pub fn secp256k1_context_preallocated_clone_size(cx: *const Context) -> size_t; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_context_preallocated_clone")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_context_preallocated_clone")] pub fn secp256k1_context_preallocated_clone(cx: *const Context, prealloc: NonNull) -> NonNull; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_context_randomize")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_context_randomize")] pub fn secp256k1_context_randomize(cx: NonNull, seed32: *const c_uchar) -> c_int; // Pubkeys - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ec_pubkey_parse")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ec_pubkey_parse")] pub fn secp256k1_ec_pubkey_parse(cx: *const Context, pk: *mut PublicKey, input: *const c_uchar, in_len: size_t) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ec_pubkey_serialize")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ec_pubkey_serialize")] pub fn secp256k1_ec_pubkey_serialize(cx: *const Context, output: *mut c_uchar, out_len: *mut size_t, pk: *const PublicKey, compressed: c_uint) -> c_int; // EC - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ec_pubkey_create")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ec_pubkey_create")] pub fn secp256k1_ec_pubkey_create(cx: *const Context, pk: *mut PublicKey, sk: *const c_uchar) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ec_pubkey_negate")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ec_pubkey_negate")] pub fn secp256k1_ec_pubkey_negate(cx: *const Context, pk: *mut PublicKey) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ec_pubkey_cmp")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ec_pubkey_cmp")] pub fn secp256k1_ec_pubkey_cmp(cx: *const Context, pubkey1: *const PublicKey, pubkey2: *const PublicKey) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ec_pubkey_tweak_add")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ec_pubkey_tweak_add")] pub fn secp256k1_ec_pubkey_tweak_add(cx: *const Context, pk: *mut PublicKey, tweak: *const c_uchar) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ec_pubkey_tweak_mul")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ec_pubkey_tweak_mul")] pub fn secp256k1_ec_pubkey_tweak_mul(cx: *const Context, pk: *mut PublicKey, tweak: *const c_uchar) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ec_pubkey_combine")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ec_pubkey_combine")] pub fn secp256k1_ec_pubkey_combine(cx: *const Context, out: *mut PublicKey, ins: *const *const PublicKey, n: size_t) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ecdh")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ecdh")] pub fn secp256k1_ecdh( cx: *const Context, output: *mut c_uchar, @@ -738,14 +738,14 @@ extern "C" { ) -> c_int; // ECDSA - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ecdsa_verify")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ecdsa_verify")] pub fn secp256k1_ecdsa_verify(cx: *const Context, sig: *const Signature, msg32: *const c_uchar, pk: *const PublicKey) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ecdsa_sign")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ecdsa_sign")] pub fn secp256k1_ecdsa_sign(cx: *const Context, sig: *mut Signature, msg32: *const c_uchar, @@ -755,7 +755,7 @@ extern "C" { -> c_int; // Schnorr Signatures - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_schnorrsig_sign")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_schnorrsig_sign")] pub fn secp256k1_schnorrsig_sign( cx: *const Context, sig: *mut c_uchar, @@ -765,7 +765,7 @@ extern "C" { ) -> c_int; // Schnorr Signatures with extra parameters (see [`SchnorrSigExtraParams`]) - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_schnorrsig_sign_custom")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_schnorrsig_sign_custom")] pub fn secp256k1_schnorrsig_sign_custom( cx: *const Context, sig: *mut c_uchar, @@ -775,7 +775,7 @@ extern "C" { extra_params: *const SchnorrSigExtraParams, ) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_schnorrsig_verify")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_schnorrsig_verify")] pub fn secp256k1_schnorrsig_verify( cx: *const Context, sig64: *const c_uchar, @@ -785,28 +785,28 @@ extern "C" { ) -> c_int; // Extra keys - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_keypair_create")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_keypair_create")] pub fn secp256k1_keypair_create( cx: *const Context, keypair: *mut Keypair, seckey: *const c_uchar, ) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_xonly_pubkey_parse")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_xonly_pubkey_parse")] pub fn secp256k1_xonly_pubkey_parse( cx: *const Context, pubkey: *mut XOnlyPublicKey, input32: *const c_uchar, ) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_xonly_pubkey_serialize")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_xonly_pubkey_serialize")] pub fn secp256k1_xonly_pubkey_serialize( cx: *const Context, output32: *mut c_uchar, pubkey: *const XOnlyPublicKey, ) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_xonly_pubkey_from_pubkey")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_xonly_pubkey_from_pubkey")] pub fn secp256k1_xonly_pubkey_from_pubkey( cx: *const Context, xonly_pubkey: *mut XOnlyPublicKey, @@ -814,14 +814,14 @@ extern "C" { pubkey: *const PublicKey, ) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_xonly_pubkey_cmp")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_xonly_pubkey_cmp")] pub fn secp256k1_xonly_pubkey_cmp( cx: *const Context, pubkey1: *const XOnlyPublicKey, pubkey2: *const XOnlyPublicKey ) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add")] pub fn secp256k1_xonly_pubkey_tweak_add( cx: *const Context, output_pubkey: *mut PublicKey, @@ -829,7 +829,7 @@ extern "C" { tweak32: *const c_uchar, ) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_keypair_xonly_pub")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_keypair_xonly_pub")] pub fn secp256k1_keypair_xonly_pub( cx: *const Context, pubkey: *mut XOnlyPublicKey, @@ -837,14 +837,14 @@ extern "C" { keypair: *const Keypair ) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_keypair_xonly_tweak_add")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_keypair_xonly_tweak_add")] pub fn secp256k1_keypair_xonly_tweak_add( cx: *const Context, keypair: *mut Keypair, tweak32: *const c_uchar, ) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_xonly_pubkey_tweak_add_check")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_xonly_pubkey_tweak_add_check")] pub fn secp256k1_xonly_pubkey_tweak_add_check( cx: *const Context, tweaked_pubkey32: *const c_uchar, @@ -871,7 +871,7 @@ extern "C" { /// The newly created secp256k1 raw context. #[cfg(all(feature = "alloc", not(rust_secp_no_symbol_renaming)))] pub unsafe fn secp256k1_context_create(flags: c_uint) -> NonNull { - rustsecp256k1_v0_9_2_context_create(flags) + rustsecp256k1_v0_10_0_context_create(flags) } /// A reimplementation of the C function `secp256k1_context_create` in rust. @@ -880,7 +880,7 @@ pub unsafe fn secp256k1_context_create(flags: c_uint) -> NonNull { #[no_mangle] #[allow(clippy::missing_safety_doc)] // Documented above. #[cfg(all(feature = "alloc", not(rust_secp_no_symbol_renaming)))] -pub unsafe extern "C" fn rustsecp256k1_v0_9_2_context_create(flags: c_uint) -> NonNull { +pub unsafe extern "C" fn rustsecp256k1_v0_10_0_context_create(flags: c_uint) -> NonNull { use core::mem; use crate::alloc::alloc; assert!(ALIGN_TO >= mem::align_of::()); @@ -913,13 +913,13 @@ pub unsafe extern "C" fn rustsecp256k1_v0_9_2_context_create(flags: c_uint) -> N /// `ctx` must be a valid pointer to a block of memory created using [`secp256k1_context_create`]. #[cfg(all(feature = "alloc", not(rust_secp_no_symbol_renaming)))] pub unsafe fn secp256k1_context_destroy(ctx: NonNull) { - rustsecp256k1_v0_9_2_context_destroy(ctx) + rustsecp256k1_v0_10_0_context_destroy(ctx) } #[no_mangle] #[allow(clippy::missing_safety_doc)] // Documented above. #[cfg(all(feature = "alloc", not(rust_secp_no_symbol_renaming)))] -pub unsafe extern "C" fn rustsecp256k1_v0_9_2_context_destroy(mut ctx: NonNull) { +pub unsafe extern "C" fn rustsecp256k1_v0_10_0_context_destroy(mut ctx: NonNull) { use crate::alloc::alloc; secp256k1_context_preallocated_destroy(ctx); let ctx: *mut Context = ctx.as_mut(); @@ -955,7 +955,7 @@ pub unsafe extern "C" fn rustsecp256k1_v0_9_2_context_destroy(mut ctx: NonNull size_t; - fn rustsecp256k1_v0_9_2_context_preallocated_create(prealloc: NonNull, flags: c_uint) -> NonNull; - fn rustsecp256k1_v0_9_2_context_preallocated_clone(cx: *const Context, prealloc: NonNull) -> NonNull; + fn rustsecp256k1_v0_10_0_context_preallocated_size(flags: c_uint) -> size_t; + fn rustsecp256k1_v0_10_0_context_preallocated_create(prealloc: NonNull, flags: c_uint) -> NonNull; + fn rustsecp256k1_v0_10_0_context_preallocated_clone(cx: *const Context, prealloc: NonNull) -> NonNull; } #[cfg(feature = "lowmemory")] @@ -1103,7 +1103,7 @@ mod fuzz_dummy { const CTX_SIZE: usize = 1024 * (1024 + 128); // Contexts pub unsafe fn secp256k1_context_preallocated_size(flags: c_uint) -> size_t { - assert!(rustsecp256k1_v0_9_2_context_preallocated_size(flags) + std::mem::size_of::() <= CTX_SIZE); + assert!(rustsecp256k1_v0_10_0_context_preallocated_size(flags) + std::mem::size_of::() <= CTX_SIZE); CTX_SIZE } @@ -1123,8 +1123,8 @@ mod fuzz_dummy { if have_ctx == HAVE_CONTEXT_NONE { have_ctx = HAVE_PREALLOCATED_CONTEXT.swap(HAVE_CONTEXT_WORKING, Ordering::AcqRel); if have_ctx == HAVE_CONTEXT_NONE { - assert!(rustsecp256k1_v0_9_2_context_preallocated_size(SECP256K1_START_SIGN | SECP256K1_START_VERIFY) + std::mem::size_of::() <= CTX_SIZE); - assert_eq!(rustsecp256k1_v0_9_2_context_preallocated_create( + assert!(rustsecp256k1_v0_10_0_context_preallocated_size(SECP256K1_START_SIGN | SECP256K1_START_VERIFY) + std::mem::size_of::() <= CTX_SIZE); + assert_eq!(rustsecp256k1_v0_10_0_context_preallocated_create( NonNull::new_unchecked(PREALLOCATED_CONTEXT[..].as_mut_ptr() as *mut c_void), SECP256K1_START_SIGN | SECP256K1_START_VERIFY), NonNull::new_unchecked(PREALLOCATED_CONTEXT[..].as_mut_ptr() as *mut Context)); @@ -1153,7 +1153,7 @@ mod fuzz_dummy { let new_ptr = (prealloc.as_ptr() as *mut u8).add(CTX_SIZE).sub(std::mem::size_of::()); let flags = (orig_ptr as *mut c_uint).read(); (new_ptr as *mut c_uint).write(flags); - rustsecp256k1_v0_9_2_context_preallocated_clone(cx, prealloc) + rustsecp256k1_v0_10_0_context_preallocated_clone(cx, prealloc) } pub unsafe fn secp256k1_context_randomize(cx: NonNull, diff --git a/secp256k1-sys/src/recovery.rs b/secp256k1-sys/src/recovery.rs index fc9ce2db3..d17e5fbab 100644 --- a/secp256k1-sys/src/recovery.rs +++ b/secp256k1-sys/src/recovery.rs @@ -100,17 +100,17 @@ impl core::hash::Hash for RecoverableSignature { } extern "C" { - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_parse_compact")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_parse_compact")] pub fn secp256k1_ecdsa_recoverable_signature_parse_compact(cx: *const Context, sig: *mut RecoverableSignature, input64: *const c_uchar, recid: c_int) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_serialize_compact")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_serialize_compact")] pub fn secp256k1_ecdsa_recoverable_signature_serialize_compact(cx: *const Context, output64: *mut c_uchar, recid: *mut c_int, sig: *const RecoverableSignature) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ecdsa_recoverable_signature_convert")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ecdsa_recoverable_signature_convert")] pub fn secp256k1_ecdsa_recoverable_signature_convert(cx: *const Context, sig: *mut Signature, input: *const RecoverableSignature) -> c_int; @@ -118,7 +118,7 @@ extern "C" { #[cfg(not(secp256k1_fuzz))] extern "C" { - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ecdsa_sign_recoverable")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ecdsa_sign_recoverable")] pub fn secp256k1_ecdsa_sign_recoverable(cx: *const Context, sig: *mut RecoverableSignature, msg32: *const c_uchar, @@ -127,7 +127,7 @@ extern "C" { noncedata: *const c_void) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_9_2_ecdsa_recover")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_10_0_ecdsa_recover")] pub fn secp256k1_ecdsa_recover(cx: *const Context, pk: *mut PublicKey, sig: *const RecoverableSignature,